comment
stringlengths
1
5.49k
method_body
stringlengths
27
75.2k
target_code
stringlengths
0
5.16k
method_body_after
stringlengths
27
76k
context_before
stringlengths
8
252k
context_after
stringlengths
8
253k
I feel we can skip initializing this here as it gets initialized in the client construction before init here: https://github.com/Azure/azure-sdk-for-java/blob/ff077373038ff09cf967ddab3d3cf4f0649ca60a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/AsyncDocumentClient.java#L230 -> https://github.com/Azure/azure-sdk-for-java/blob/ff077373038ff09cf967ddab3d3cf4f0649ca60a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/RxDocumentClientImpl.java#L373 So the fix just would be deleting the line that initializes queryPlanCache here ```suggestion ```
public void init(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, Function<HttpClient, HttpClient> httpClientInterceptor) { try { this.httpClientInterceptor = httpClientInterceptor; if (httpClientInterceptor != null) { this.reactorHttpClient = httpClientInterceptor.apply(httpClient()); } this.gatewayProxy = createRxGatewayProxy(this.sessionContainer, this.consistencyLevel, this.queryCompatibilityMode, this.userAgentContainer, this.globalEndpointManager, this.reactorHttpClient, this.apiType); this.globalEndpointManager.init(); this.initializeGatewayConfigurationReader(); if (metadataCachesSnapshot != null) { this.collectionCache = new RxClientCollectionCache(this, this.sessionContainer, this.gatewayProxy, this, this.retryPolicy, metadataCachesSnapshot.getCollectionInfoByNameCache(), metadataCachesSnapshot.getCollectionInfoByIdCache() ); } else { this.collectionCache = new RxClientCollectionCache(this, this.sessionContainer, this.gatewayProxy, this, this.retryPolicy); } this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy); this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this, collectionCache); updateGatewayProxy(); if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) { this.storeModel = this.gatewayProxy; } else { this.initializeDirectConnectivity(); } clientTelemetry = new ClientTelemetry(null, UUID.randomUUID().toString(), ManagementFactory.getRuntimeMXBean().getName(), userAgentContainer.getUserAgent(), connectionPolicy.getConnectionMode(), globalEndpointManager.getLatestDatabaseAccount().getId(), null, null, this.reactorHttpClient, connectionPolicy.isClientTelemetryEnabled(), this, this.connectionPolicy.getPreferredRegions()); clientTelemetry.init(); this.queryPlanCache = Collections.synchronizedMap(new SizeLimitingLRUCache(Constants.QUERYPLAN_CACHE_SIZE)); this.retryPolicy.setRxCollectionCache(this.collectionCache); } catch (Exception e) { logger.error("unexpected failure in initializing client.", e); close(); throw e; } }
this.queryPlanCache = Collections.synchronizedMap(new SizeLimitingLRUCache(Constants.QUERYPLAN_CACHE_SIZE));
public void init(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, Function<HttpClient, HttpClient> httpClientInterceptor) { try { this.httpClientInterceptor = httpClientInterceptor; if (httpClientInterceptor != null) { this.reactorHttpClient = httpClientInterceptor.apply(httpClient()); } this.gatewayProxy = createRxGatewayProxy(this.sessionContainer, this.consistencyLevel, this.queryCompatibilityMode, this.userAgentContainer, this.globalEndpointManager, this.reactorHttpClient, this.apiType); this.globalEndpointManager.init(); this.initializeGatewayConfigurationReader(); if (metadataCachesSnapshot != null) { this.collectionCache = new RxClientCollectionCache(this, this.sessionContainer, this.gatewayProxy, this, this.retryPolicy, metadataCachesSnapshot.getCollectionInfoByNameCache(), metadataCachesSnapshot.getCollectionInfoByIdCache() ); } else { this.collectionCache = new RxClientCollectionCache(this, this.sessionContainer, this.gatewayProxy, this, this.retryPolicy); } this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy); this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this, collectionCache); updateGatewayProxy(); clientTelemetry = new ClientTelemetry(null, UUID.randomUUID().toString(), ManagementFactory.getRuntimeMXBean().getName(), userAgentContainer.getUserAgent(), connectionPolicy.getConnectionMode(), globalEndpointManager.getLatestDatabaseAccount().getId(), null, null, this.reactorHttpClient, connectionPolicy.isClientTelemetryEnabled(), this, this.connectionPolicy.getPreferredRegions()); clientTelemetry.init(); if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) { this.storeModel = this.gatewayProxy; } else { this.initializeDirectConnectivity(); } this.retryPolicy.setRxCollectionCache(this.collectionCache); } catch (Exception e) { logger.error("unexpected failure in initializing client.", e); close(); throw e; } }
class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider, CpuMemoryListener, DiagnosticsClientContext { private static final AtomicInteger activeClientsCnt = new AtomicInteger(0); private static final AtomicInteger clientIdGenerator = new AtomicInteger(0); private static final Range<String> RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES = new Range<>( PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey, PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey, true, false); private static final String DUMMY_SQL_QUERY = "this is dummy and only used in creating " + "ParallelDocumentQueryExecutioncontext, but not used"; private final static ObjectMapper mapper = Utils.getSimpleObjectMapper(); private final ItemDeserializer itemDeserializer = new ItemDeserializer.JsonDeserializer(); private final Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class); private final String masterKeyOrResourceToken; private final URI serviceEndpoint; private final ConnectionPolicy connectionPolicy; private final ConsistencyLevel consistencyLevel; private final BaseAuthorizationTokenProvider authorizationTokenProvider; private final UserAgentContainer userAgentContainer; private final boolean hasAuthKeyResourceToken; private final Configs configs; private final boolean connectionSharingAcrossClientsEnabled; private AzureKeyCredential credential; private final TokenCredential tokenCredential; private String[] tokenCredentialScopes; private SimpleTokenCache tokenCredentialCache; private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver; AuthorizationTokenType authorizationTokenType; private SessionContainer sessionContainer; private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY; private RxClientCollectionCache collectionCache; private RxStoreModel gatewayProxy; private RxStoreModel storeModel; private GlobalAddressResolver addressResolver; private RxPartitionKeyRangeCache partitionKeyRangeCache; private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap; private final boolean contentResponseOnWriteEnabled; private Map<String, PartitionedQueryExecutionInfo> queryPlanCache; private final AtomicBoolean closed = new AtomicBoolean(false); private final int clientId; private ClientTelemetry clientTelemetry; private ApiType apiType; private IRetryPolicyFactory resetSessionTokenRetryPolicy; /** * Compatibility mode: Allows to specify compatibility mode used by client when * making query requests. Should be removed when application/sql is no longer * supported. */ private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default; private final GlobalEndpointManager globalEndpointManager; private final RetryPolicy retryPolicy; private HttpClient reactorHttpClient; private Function<HttpClient, HttpClient> httpClientInterceptor; private volatile boolean useMultipleWriteLocations; private StoreClientFactory storeClientFactory; private GatewayServiceConfigurationReader gatewayConfigurationReader; private final DiagnosticsClientConfig diagnosticsClientConfig; private final AtomicBoolean throughputControlEnabled; private ThroughputControlStore throughputControlStore; public RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver, AzureKeyCredential credential, boolean sessionCapturingOverride, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType) { this(serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs, credential, null, sessionCapturingOverride, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled, metadataCachesSnapshot, apiType); this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver; } public RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver, AzureKeyCredential credential, TokenCredential tokenCredential, boolean sessionCapturingOverride, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType) { this(serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs, credential, tokenCredential, sessionCapturingOverride, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled, metadataCachesSnapshot, apiType); this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver; } private RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, AzureKeyCredential credential, TokenCredential tokenCredential, boolean sessionCapturingOverrideEnabled, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType) { this(serviceEndpoint, masterKeyOrResourceToken, connectionPolicy, consistencyLevel, configs, credential, tokenCredential, sessionCapturingOverrideEnabled, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled, metadataCachesSnapshot, apiType); if (permissionFeed != null && permissionFeed.size() > 0) { this.resourceTokensMap = new HashMap<>(); for (Permission permission : permissionFeed) { String[] segments = StringUtils.split(permission.getResourceLink(), Constants.Properties.PATH_SEPARATOR.charAt(0)); if (segments.length <= 0) { throw new IllegalArgumentException("resourceLink"); } List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null; PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false); if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) { throw new IllegalArgumentException(permission.getResourceLink()); } partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName); if (partitionKeyAndResourceTokenPairs == null) { partitionKeyAndResourceTokenPairs = new ArrayList<>(); this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs); } PartitionKey partitionKey = permission.getResourcePartitionKey(); partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair( partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty, permission.getToken())); logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token [{}]", pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken()); } if(this.resourceTokensMap.isEmpty()) { throw new IllegalArgumentException("permissionFeed"); } String firstToken = permissionFeed.get(0).getToken(); if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) { this.firstResourceTokenFromPermissionFeed = firstToken; } } } RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, AzureKeyCredential credential, TokenCredential tokenCredential, boolean sessionCapturingOverrideEnabled, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType) { activeClientsCnt.incrementAndGet(); this.clientId = clientIdGenerator.getAndDecrement(); this.diagnosticsClientConfig = new DiagnosticsClientConfig(); this.diagnosticsClientConfig.withClientId(this.clientId); this.diagnosticsClientConfig.withActiveClientCounter(activeClientsCnt); this.diagnosticsClientConfig.withConnectionSharingAcrossClientsEnabled(connectionSharingAcrossClientsEnabled); this.diagnosticsClientConfig.withConsistency(consistencyLevel); this.throughputControlEnabled = new AtomicBoolean(false); logger.info( "Initializing DocumentClient [{}] with" + " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}], directModeProtocol [{}]", this.clientId, serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol()); try { this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled; this.configs = configs; this.masterKeyOrResourceToken = masterKeyOrResourceToken; this.serviceEndpoint = serviceEndpoint; this.credential = credential; this.tokenCredential = tokenCredential; this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled; this.authorizationTokenType = AuthorizationTokenType.Invalid; if (this.credential != null) { hasAuthKeyResourceToken = false; this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey; this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential); } else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) { this.authorizationTokenProvider = null; hasAuthKeyResourceToken = true; this.authorizationTokenType = AuthorizationTokenType.ResourceToken; } else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) { this.credential = new AzureKeyCredential(this.masterKeyOrResourceToken); hasAuthKeyResourceToken = false; this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey; this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential); } else { hasAuthKeyResourceToken = false; this.authorizationTokenProvider = null; if (tokenCredential != null) { this.tokenCredentialScopes = new String[] { serviceEndpoint.getScheme() + ": }; this.tokenCredentialCache = new SimpleTokenCache(() -> this.tokenCredential .getToken(new TokenRequestContext().addScopes(this.tokenCredentialScopes))); this.authorizationTokenType = AuthorizationTokenType.AadToken; } } if (connectionPolicy != null) { this.connectionPolicy = connectionPolicy; } else { this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig()); } this.diagnosticsClientConfig.withConnectionMode(this.getConnectionPolicy().getConnectionMode()); this.diagnosticsClientConfig.withMultipleWriteRegionsEnabled(this.connectionPolicy.isMultipleWriteRegionsEnabled()); this.diagnosticsClientConfig.withEndpointDiscoveryEnabled(this.connectionPolicy.isEndpointDiscoveryEnabled()); this.diagnosticsClientConfig.withPreferredRegions(this.connectionPolicy.getPreferredRegions()); boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled); this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing); this.consistencyLevel = consistencyLevel; this.userAgentContainer = new UserAgentContainer(); String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix(); if (userAgentSuffix != null && userAgentSuffix.length() > 0) { userAgentContainer.setSuffix(userAgentSuffix); } this.httpClientInterceptor = null; this.reactorHttpClient = httpClient(); this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs); this.retryPolicy = new RetryPolicy(this, this.globalEndpointManager, this.connectionPolicy); this.resetSessionTokenRetryPolicy = retryPolicy; CpuMemoryMonitor.register(this); this.queryPlanCache = Collections.synchronizedMap(new SizeLimitingLRUCache(Constants.QUERYPLAN_CACHE_SIZE)); this.apiType = apiType; } catch (RuntimeException e) { logger.error("unexpected failure in initializing client.", e); close(); throw e; } } @Override public DiagnosticsClientConfig getConfig() { return diagnosticsClientConfig; } @Override public CosmosDiagnostics createDiagnostics() { return BridgeInternal.createCosmosDiagnostics(this, this.globalEndpointManager); } private void initializeGatewayConfigurationReader() { this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.globalEndpointManager); DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount(); if (databaseAccount == null) { logger.error("Client initialization failed." + " Check if the endpoint is reachable and if your auth token is valid. More info: https: throw new RuntimeException("Client initialization failed." + " Check if the endpoint is reachable and if your auth token is valid. More info: https: } this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount); } private void updateGatewayProxy() { ((RxGatewayStoreModel)this.gatewayProxy).setGatewayServiceConfigurationReader(this.gatewayConfigurationReader); ((RxGatewayStoreModel)this.gatewayProxy).setCollectionCache(this.collectionCache); ((RxGatewayStoreModel)this.gatewayProxy).setPartitionKeyRangeCache(this.partitionKeyRangeCache); ((RxGatewayStoreModel)this.gatewayProxy).setUseMultipleWriteLocations(this.useMultipleWriteLocations); } public void serialize(CosmosClientMetadataCachesSnapshot state) { RxCollectionCache.serialize(state, this.collectionCache); } private void initializeDirectConnectivity() { this.addressResolver = new GlobalAddressResolver(this, this.reactorHttpClient, this.globalEndpointManager, this.configs.getProtocol(), this, this.collectionCache, this.partitionKeyRangeCache, userAgentContainer, null, this.connectionPolicy, this.apiType); this.storeClientFactory = new StoreClientFactory( this.addressResolver, this.diagnosticsClientConfig, this.configs, this.connectionPolicy, this.userAgentContainer, this.connectionSharingAcrossClientsEnabled ); this.createStoreModel(true); } DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() { return new DatabaseAccountManagerInternal() { @Override public URI getServiceEndpoint() { return RxDocumentClientImpl.this.getServiceEndpoint(); } @Override public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) { logger.info("Getting database account endpoint from {}", endpoint); return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint); } @Override public ConnectionPolicy getConnectionPolicy() { return RxDocumentClientImpl.this.getConnectionPolicy(); } }; } RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer, ConsistencyLevel consistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient, ApiType apiType) { return new RxGatewayStoreModel( this, sessionContainer, consistencyLevel, queryCompatibilityMode, userAgentContainer, globalEndpointManager, httpClient, apiType); } private HttpClient httpClient() { HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs) .withMaxIdleConnectionTimeout(this.connectionPolicy.getIdleHttpConnectionTimeout()) .withPoolSize(this.connectionPolicy.getMaxConnectionPoolSize()) .withProxy(this.connectionPolicy.getProxy()) .withNetworkRequestTimeout(this.connectionPolicy.getHttpNetworkRequestTimeout()); if (connectionSharingAcrossClientsEnabled) { return SharedGatewayHttpClient.getOrCreateInstance(httpClientConfig, diagnosticsClientConfig); } else { diagnosticsClientConfig.withGatewayHttpClientConfig(httpClientConfig); return HttpClient.createFixed(httpClientConfig); } } private void createStoreModel(boolean subscribeRntbdStatus) { StoreClient storeClient = this.storeClientFactory.createStoreClient(this, this.addressResolver, this.sessionContainer, this.gatewayConfigurationReader, this, this.useMultipleWriteLocations ); this.storeModel = new ServerStoreModel(storeClient); } @Override public URI getServiceEndpoint() { return this.serviceEndpoint; } @Override public URI getWriteEndpoint() { return globalEndpointManager.getWriteEndpoints().stream().findFirst().orElse(null); } @Override public URI getReadEndpoint() { return globalEndpointManager.getReadEndpoints().stream().findFirst().orElse(null); } @Override public ConnectionPolicy getConnectionPolicy() { return this.connectionPolicy; } @Override public boolean isContentResponseOnWriteEnabled() { return contentResponseOnWriteEnabled; } @Override public ConsistencyLevel getConsistencyLevel() { return consistencyLevel; } @Override public ClientTelemetry getClientTelemetry() { return this.clientTelemetry; } @Override public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (database == null) { throw new IllegalArgumentException("Database"); } logger.debug("Creating a Database. id: [{}]", database.getId()); validateResource(database); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Create); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(database); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.DATABASE_SERIALIZATION); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Create, ResourceType.Database, Paths.DATABASES_ROOT, byteBuffer, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)) .map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink); String path = Utils.joinPath(databaseLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Database, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } logger.debug("Reading a Database. databaseLink: [{}]", databaseLink); String path = Utils.joinPath(databaseLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Database, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Database>> readDatabases(CosmosQueryRequestOptions options) { return readFeed(options, ResourceType.Database, Database.class, Paths.DATABASES_ROOT); } private String parentResourceLinkToQueryLink(String parentResourceLink, ResourceType resourceTypeEnum) { switch (resourceTypeEnum) { case Database: return Paths.DATABASES_ROOT; case DocumentCollection: return Utils.joinPath(parentResourceLink, Paths.COLLECTIONS_PATH_SEGMENT); case Document: return Utils.joinPath(parentResourceLink, Paths.DOCUMENTS_PATH_SEGMENT); case Offer: return Paths.OFFERS_ROOT; case User: return Utils.joinPath(parentResourceLink, Paths.USERS_PATH_SEGMENT); case ClientEncryptionKey: return Utils.joinPath(parentResourceLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT); case Permission: return Utils.joinPath(parentResourceLink, Paths.PERMISSIONS_PATH_SEGMENT); case Attachment: return Utils.joinPath(parentResourceLink, Paths.ATTACHMENTS_PATH_SEGMENT); case StoredProcedure: return Utils.joinPath(parentResourceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT); case Trigger: return Utils.joinPath(parentResourceLink, Paths.TRIGGERS_PATH_SEGMENT); case UserDefinedFunction: return Utils.joinPath(parentResourceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); case Conflict: return Utils.joinPath(parentResourceLink, Paths.CONFLICTS_PATH_SEGMENT); default: throw new IllegalArgumentException("resource type not supported"); } } private OperationContextAndListenerTuple getOperationContextAndListenerTuple(CosmosQueryRequestOptions options) { if (options == null) { return null; } return ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor().getOperationContext(options); } private OperationContextAndListenerTuple getOperationContextAndListenerTuple(RequestOptions options) { if (options == null) { return null; } return options.getOperationContextAndListenerTuple(); } private <T extends Resource> Flux<FeedResponse<T>> createQuery( String parentResourceLink, SqlQuerySpec sqlQuery, CosmosQueryRequestOptions options, Class<T> klass, ResourceType resourceTypeEnum) { String resourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum); UUID activityId = Utils.randomUUID(); IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options)); InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy( this.collectionCache, null, resourceLink, ModelBridgeInternal.getPropertiesFromQueryRequestOptions(options)); return ObservableHelper.fluxInlineIfPossibleAsObs( () -> createQueryInternal(resourceLink, sqlQuery, options, klass, resourceTypeEnum, queryClient, activityId), invalidPartitionExceptionRetryPolicy); } private <T extends Resource> Flux<FeedResponse<T>> createQueryInternal( String resourceLink, SqlQuerySpec sqlQuery, CosmosQueryRequestOptions options, Class<T> klass, ResourceType resourceTypeEnum, IDocumentQueryClient queryClient, UUID activityId) { Flux<? extends IDocumentQueryExecutionContext<T>> executionContext = DocumentQueryExecutionContextFactory .createDocumentQueryExecutionContextAsync(this, queryClient, resourceTypeEnum, klass, sqlQuery, options, resourceLink, false, activityId, Configs.isQueryPlanCachingEnabled(), queryPlanCache); AtomicBoolean isFirstResponse = new AtomicBoolean(true); return executionContext.flatMap(iDocumentQueryExecutionContext -> { QueryInfo queryInfo = null; if (iDocumentQueryExecutionContext instanceof PipelinedDocumentQueryExecutionContext) { queryInfo = ((PipelinedDocumentQueryExecutionContext<T>) iDocumentQueryExecutionContext).getQueryInfo(); } QueryInfo finalQueryInfo = queryInfo; return iDocumentQueryExecutionContext.executeAsync() .map(tFeedResponse -> { if (finalQueryInfo != null) { if (finalQueryInfo.hasSelectValue()) { ModelBridgeInternal .addQueryInfoToFeedResponse(tFeedResponse, finalQueryInfo); } if (isFirstResponse.compareAndSet(true, false)) { ModelBridgeInternal.addQueryPlanDiagnosticsContextToFeedResponse(tFeedResponse, finalQueryInfo.getQueryPlanDiagnosticsContext()); } } return tFeedResponse; }); }); } @Override public Flux<FeedResponse<Database>> queryDatabases(String query, CosmosQueryRequestOptions options) { return queryDatabases(new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(Paths.DATABASES_ROOT, querySpec, options, Database.class, ResourceType.Database); } @Override public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink, DocumentCollection collection, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink, DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (collection == null) { throw new IllegalArgumentException("collection"); } logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink, collection.getId()); validateResource(collection); String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Create); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Create, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class)) .doOnNext(resourceResponse -> { this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(), getAltLink(resourceResponse.getResource()), resourceResponse.getResponseHeaders()); }); } catch (Exception e) { logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (collection == null) { throw new IllegalArgumentException("collection"); } logger.debug("Replacing a Collection. id: [{}]", collection.getId()); validateResource(collection); String path = Utils.joinPath(collection.getSelfLink(), null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Replace); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)) .doOnNext(resourceResponse -> { if (resourceResponse.getResource() != null) { this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(), getAltLink(resourceResponse.getResource()), resourceResponse.getResponseHeaders()); } }); } catch (Exception e) { logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.DocumentCollection, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class)); } catch (Exception e) { logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e); return Mono.error(e); } } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeaders(request, RequestVerb.DELETE) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple); }); } private Mono<RxDocumentServiceResponse> deleteAllItemsByPartitionKey(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeaders(request, RequestVerb.POST) .flatMap(requestPopulated -> { RxStoreModel storeProxy = this.getStoreProxy(requestPopulated); if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple); }); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { return populateHeaders(request, RequestVerb.GET) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated); }); } Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return populateHeaders(request, RequestVerb.GET) .flatMap(requestPopulated -> getStoreProxy(requestPopulated).processMessage(requestPopulated)); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { return populateHeaders(request, RequestVerb.POST) .flatMap(requestPopulated -> this.getStoreProxy(requestPopulated).processMessage(requestPopulated) .map(response -> { this.captureSessionToken(requestPopulated, response); return response; } )); } @Override public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.DocumentCollection, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)); } catch (Exception e) { logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return readFeed(options, ResourceType.DocumentCollection, DocumentCollection.class, Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query, CosmosQueryRequestOptions options) { return createQuery(databaseLink, new SqlQuerySpec(query), options, DocumentCollection.class, ResourceType.DocumentCollection); } @Override public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(databaseLink, querySpec, options, DocumentCollection.class, ResourceType.DocumentCollection); } private static String serializeProcedureParams(List<Object> objectArray) { String[] stringArray = new String[objectArray.size()]; for (int i = 0; i < objectArray.size(); ++i) { Object object = objectArray.get(i); if (object instanceof JsonSerializable) { stringArray[i] = ModelBridgeInternal.toJsonFromJsonSerializable((JsonSerializable) object); } else { try { stringArray[i] = mapper.writeValueAsString(object); } catch (IOException e) { throw new IllegalArgumentException("Can't serialize the object into the json string", e); } } } return String.format("[%s]", StringUtils.join(stringArray, ",")); } private static void validateResource(Resource resource) { if (!StringUtils.isEmpty(resource.getId())) { if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 || resource.getId().indexOf('?') != -1 || resource.getId().indexOf(' throw new IllegalArgumentException("Id contains illegal chars."); } if (resource.getId().endsWith(" ")) { throw new IllegalArgumentException("Id ends with a space."); } } } private Map<String, String> getRequestHeaders(RequestOptions options, ResourceType resourceType, OperationType operationType) { Map<String, String> headers = new HashMap<>(); if (this.useMultipleWriteLocations) { headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString()); } if (consistencyLevel != null) { headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString()); } if (options == null) { if (!this.contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) { headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL); } return headers; } Map<String, String> customOptions = options.getHeaders(); if (customOptions != null) { headers.putAll(customOptions); } boolean contentResponseOnWriteEnabled = this.contentResponseOnWriteEnabled; if (options.isContentResponseOnWriteEnabled() != null) { contentResponseOnWriteEnabled = options.isContentResponseOnWriteEnabled(); } if (!contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) { headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL); } if (options.getIfMatchETag() != null) { headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getIfMatchETag()); } if(options.getIfNoneMatchETag() != null) { headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getIfNoneMatchETag()); } if (options.getConsistencyLevel() != null) { headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString()); } if (options.getIndexingDirective() != null) { headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString()); } if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) { String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ","); headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude); } if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) { String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ","); headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude); } if (!Strings.isNullOrEmpty(options.getSessionToken())) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken()); } if (options.getResourceTokenExpirySeconds() != null) { headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY, String.valueOf(options.getResourceTokenExpirySeconds())); } if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) { headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString()); } else if (options.getOfferType() != null) { headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType()); } if (options.getOfferThroughput() == null) { if (options.getThroughputProperties() != null) { Offer offer = ModelBridgeInternal.getOfferFromThroughputProperties(options.getThroughputProperties()); final OfferAutoscaleSettings offerAutoscaleSettings = offer.getOfferAutoScaleSettings(); OfferAutoscaleAutoUpgradeProperties autoscaleAutoUpgradeProperties = null; if (offerAutoscaleSettings != null) { autoscaleAutoUpgradeProperties = offer.getOfferAutoScaleSettings().getAutoscaleAutoUpgradeProperties(); } if (offer.hasOfferThroughput() && (offerAutoscaleSettings != null && offerAutoscaleSettings.getMaxThroughput() >= 0 || autoscaleAutoUpgradeProperties != null && autoscaleAutoUpgradeProperties .getAutoscaleThroughputProperties() .getIncrementPercent() >= 0)) { throw new IllegalArgumentException("Autoscale provisioned throughput can not be configured with " + "fixed offer"); } if (offer.hasOfferThroughput()) { headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, String.valueOf(offer.getThroughput())); } else if (offer.getOfferAutoScaleSettings() != null) { headers.put(HttpConstants.HttpHeaders.OFFER_AUTOPILOT_SETTINGS, ModelBridgeInternal.toJsonFromJsonSerializable(offer.getOfferAutoScaleSettings())); } } } if (options.isQuotaInfoEnabled()) { headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true)); } if (options.isScriptLoggingEnabled()) { headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true)); } if (options.getDedicatedGatewayRequestOptions() != null && options.getDedicatedGatewayRequestOptions().getMaxIntegratedCacheStaleness() != null) { headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_CACHE_STALENESS, String.valueOf(Utils.getMaxIntegratedCacheStalenessInMillis(options.getDedicatedGatewayRequestOptions()))); } return headers; } public IRetryPolicyFactory getResetSessionTokenRetryPolicy() { return this.resetSessionTokenRetryPolicy; } private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request, ByteBuffer contentAsByteBuffer, Document document, RequestOptions options) { Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); return collectionObs .map(collectionValueHolder -> { addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v); return request; }); } private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request, ByteBuffer contentAsByteBuffer, Object document, RequestOptions options, Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) { return collectionObs.map(collectionValueHolder -> { addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v); return request; }); } private void addPartitionKeyInformation(RxDocumentServiceRequest request, ByteBuffer contentAsByteBuffer, Object objectDoc, RequestOptions options, DocumentCollection collection) { PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey(); PartitionKeyInternal partitionKeyInternal = null; if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){ partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition); } else if (options != null && options.getPartitionKey() != null) { partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey()); } else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) { partitionKeyInternal = PartitionKeyInternal.getEmpty(); } else if (contentAsByteBuffer != null || objectDoc != null) { InternalObjectNode internalObjectNode; if (objectDoc instanceof InternalObjectNode) { internalObjectNode = (InternalObjectNode) objectDoc; } else if (objectDoc instanceof ObjectNode) { internalObjectNode = new InternalObjectNode((ObjectNode)objectDoc); } else if (contentAsByteBuffer != null) { contentAsByteBuffer.rewind(); internalObjectNode = new InternalObjectNode(contentAsByteBuffer); } else { throw new IllegalStateException("ContentAsByteBuffer and objectDoc are null"); } Instant serializationStartTime = Instant.now(); partitionKeyInternal = extractPartitionKeyValueFromDocument(internalObjectNode, partitionKeyDefinition); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTime, serializationEndTime, SerializationDiagnosticsContext.SerializationType.PARTITION_KEY_FETCH_SERIALIZATION ); SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } } else { throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation."); } request.setPartitionKeyInternal(partitionKeyInternal); request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson())); } public static PartitionKeyInternal extractPartitionKeyValueFromDocument( InternalObjectNode document, PartitionKeyDefinition partitionKeyDefinition) { if (partitionKeyDefinition != null) { switch (partitionKeyDefinition.getKind()) { case HASH: String path = partitionKeyDefinition.getPaths().iterator().next(); List<String> parts = PathParser.getPathParts(path); if (parts.size() >= 1) { Object value = ModelBridgeInternal.getObjectByPathFromJsonSerializable(document, parts); if (value == null || value.getClass() == ObjectNode.class) { value = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition); } if (value instanceof PartitionKeyInternal) { return (PartitionKeyInternal) value; } else { return PartitionKeyInternal.fromObjectArray(Collections.singletonList(value), false); } } break; case MULTI_HASH: Object[] partitionKeyValues = new Object[partitionKeyDefinition.getPaths().size()]; for(int pathIter = 0 ; pathIter < partitionKeyDefinition.getPaths().size(); pathIter++){ String partitionPath = partitionKeyDefinition.getPaths().get(pathIter); List<String> partitionPathParts = PathParser.getPathParts(partitionPath); partitionKeyValues[pathIter] = ModelBridgeInternal.getObjectByPathFromJsonSerializable(document, partitionPathParts); } return PartitionKeyInternal.fromObjectArray(partitionKeyValues, false); default: throw new IllegalArgumentException("Unrecognized Partition kind: " + partitionKeyDefinition.getKind()); } } return null; } private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy, String documentCollectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, OperationType operationType) { if (StringUtils.isEmpty(documentCollectionLink)) { throw new IllegalArgumentException("documentCollectionLink"); } if (document == null) { throw new IllegalArgumentException("document"); } Instant serializationStartTimeUTC = Instant.now(); ByteBuffer content = BridgeInternal.serializeJsonToByteBuffer(document, mapper); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.Document, path, requestHeaders, options, content); if (requestRetryPolicy != null) { requestRetryPolicy.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); return addPartitionKeyInformation(request, content, document, options, collectionObs); } private Mono<RxDocumentServiceRequest> getBatchDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy, String documentCollectionLink, ServerBatchRequest serverBatchRequest, RequestOptions options, boolean disableAutomaticIdGeneration) { checkArgument(StringUtils.isNotEmpty(documentCollectionLink), "expected non empty documentCollectionLink"); checkNotNull(serverBatchRequest, "expected non null serverBatchRequest"); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer content = ByteBuffer.wrap(Utils.getUTF8Bytes(serverBatchRequest.getRequestBody())); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Batch); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( this, OperationType.Batch, ResourceType.Document, path, requestHeaders, options, content); if (requestRetryPolicy != null) { requestRetryPolicy.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); return collectionObs.map((Utils.ValueHolder<DocumentCollection> collectionValueHolder) -> { addBatchHeaders(request, serverBatchRequest, collectionValueHolder.v); return request; }); } private RxDocumentServiceRequest addBatchHeaders(RxDocumentServiceRequest request, ServerBatchRequest serverBatchRequest, DocumentCollection collection) { if(serverBatchRequest instanceof SinglePartitionKeyServerBatchRequest) { PartitionKey partitionKey = ((SinglePartitionKeyServerBatchRequest) serverBatchRequest).getPartitionKeyValue(); PartitionKeyInternal partitionKeyInternal; if (partitionKey.equals(PartitionKey.NONE)) { PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey(); partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition); } else { partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(partitionKey); } request.setPartitionKeyInternal(partitionKeyInternal); request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson())); } else if(serverBatchRequest instanceof PartitionKeyRangeServerBatchRequest) { request.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(((PartitionKeyRangeServerBatchRequest) serverBatchRequest).getPartitionKeyRangeId())); } else { throw new UnsupportedOperationException("Unknown Server request."); } request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_REQUEST, Boolean.TRUE.toString()); request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_ATOMIC, String.valueOf(serverBatchRequest.isAtomicBatch())); request.getHeaders().put(HttpConstants.HttpHeaders.SHOULD_BATCH_CONTINUE_ON_ERROR, String.valueOf(serverBatchRequest.isShouldContinueOnError())); request.setNumberOfItemsInBatchRequest(serverBatchRequest.getOperations().size()); return request; } private Mono<RxDocumentServiceRequest> populateHeaders(RxDocumentServiceRequest request, RequestVerb httpMethod) { request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123()); if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null || this.cosmosAuthorizationTokenResolver != null || this.credential != null) { String resourceName = request.getResourceAddress(); String authorization = this.getUserAuthorizationToken( resourceName, request.getResourceType(), httpMethod, request.getHeaders(), AuthorizationTokenType.PrimaryMasterKey, request.properties); try { authorization = URLEncoder.encode(authorization, "UTF-8"); } catch (UnsupportedEncodingException e) { throw new IllegalStateException("Failed to encode authtoken.", e); } request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); } if (this.apiType != null) { request.getHeaders().put(HttpConstants.HttpHeaders.API_TYPE, this.apiType.toString()); } if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod)) && !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) { request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON); } if (RequestVerb.PATCH.equals(httpMethod) && !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) { request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON_PATCH); } if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) { request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON); } MetadataDiagnosticsContext metadataDiagnosticsCtx = BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics); if (this.requiresFeedRangeFiltering(request)) { return request.getFeedRange() .populateFeedRangeFilteringHeaders( this.getPartitionKeyRangeCache(), request, this.collectionCache.resolveCollectionAsync(metadataDiagnosticsCtx, request)) .flatMap(this::populateAuthorizationHeader); } return this.populateAuthorizationHeader(request); } private boolean requiresFeedRangeFiltering(RxDocumentServiceRequest request) { if (request.getResourceType() != ResourceType.Document && request.getResourceType() != ResourceType.Conflict) { return false; } switch (request.getOperationType()) { case ReadFeed: case Query: case SqlQuery: return request.getFeedRange() != null; default: return false; } } @Override public Mono<RxDocumentServiceRequest> populateAuthorizationHeader(RxDocumentServiceRequest request) { if (request == null) { throw new IllegalArgumentException("request"); } if (this.authorizationTokenType == AuthorizationTokenType.AadToken) { return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache) .map(authorization -> { request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); return request; }); } else { return Mono.just(request); } } @Override public Mono<HttpHeaders> populateAuthorizationHeader(HttpHeaders httpHeaders) { if (httpHeaders == null) { throw new IllegalArgumentException("httpHeaders"); } if (this.authorizationTokenType == AuthorizationTokenType.AadToken) { return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache) .map(authorization -> { httpHeaders.set(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); return httpHeaders; }); } return Mono.just(httpHeaders); } @Override public AuthorizationTokenType getAuthorizationTokenType() { return this.authorizationTokenType; } @Override public String getUserAuthorizationToken(String resourceName, ResourceType resourceType, RequestVerb requestVerb, Map<String, String> headers, AuthorizationTokenType tokenType, Map<String, Object> properties) { if (this.cosmosAuthorizationTokenResolver != null) { return this.cosmosAuthorizationTokenResolver.getAuthorizationToken(requestVerb.toUpperCase(), resourceName, this.resolveCosmosResourceType(resourceType).toString(), properties != null ? Collections.unmodifiableMap(properties) : null); } else if (credential != null) { return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName, resourceType, headers); } else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) { return masterKeyOrResourceToken; } else { assert resourceTokensMap != null; if(resourceType.equals(ResourceType.DatabaseAccount)) { return this.firstResourceTokenFromPermissionFeed; } return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers); } } private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) { CosmosResourceType cosmosResourceType = ModelBridgeInternal.fromServiceSerializedFormat(resourceType.toString()); if (cosmosResourceType == null) { return CosmosResourceType.SYSTEM; } return cosmosResourceType; } void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) { this.sessionContainer.setSessionToken(request, response.getResponseHeaders()); } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeaders(request, RequestVerb.POST) .flatMap(requestPopulated -> { RxStoreModel storeProxy = this.getStoreProxy(requestPopulated); if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple); }); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeaders(request, RequestVerb.POST) .flatMap(requestPopulated -> { Map<String, String> headers = requestPopulated.getHeaders(); assert (headers != null); headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true"); if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple) .map(response -> { this.captureSessionToken(requestPopulated, response); return response; } ); }); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { return populateHeaders(request, RequestVerb.PUT) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated); }); } private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { populateHeaders(request, RequestVerb.PATCH); if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(request).processMessage(request); } @Override public Mono<ResourceResponse<Document>> createDocument(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs(() -> createDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), requestRetryPolicy); } private Mono<ResourceResponse<Document>> createDocumentInternal(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy requestRetryPolicy) { try { logger.debug("Creating a Document. collectionLink: [{}]", collectionLink); Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(requestRetryPolicy, collectionLink, document, options, disableAutomaticIdGeneration, OperationType.Create); Mono<RxDocumentServiceResponse> responseObservable = requestObs.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options))); return responseObservable .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs(() -> upsertDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), finalRetryPolicyInstance); } private Mono<ResourceResponse<Document>> upsertDocumentInternal(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink); Mono<RxDocumentServiceRequest> reqObs = getCreateDocumentRequest(retryPolicyInstance, collectionLink, document, options, disableAutomaticIdGeneration, OperationType.Upsert); Mono<RxDocumentServiceResponse> responseObservable = reqObs.flatMap(request -> upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options))); return responseObservable .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { String collectionLink = Utils.getCollectionName(documentLink); requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(documentLink, document, options, finalRequestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink, Object document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } if (document == null) { throw new IllegalArgumentException("document"); } Document typedDocument = documentFromObject(document, mapper); return this.replaceDocumentInternal(documentLink, typedDocument, options, retryPolicyInstance); } catch (Exception e) { logger.debug("Failure in replacing a document due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { String collectionLink = document.getSelfLink(); requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(document, options, finalRequestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<Document>> replaceDocumentInternal(Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (document == null) { throw new IllegalArgumentException("document"); } return this.replaceDocumentInternal(document.getSelfLink(), document, options, retryPolicyInstance); } catch (Exception e) { logger.debug("Failure in replacing a database due to [{}]", e.getMessage()); return Mono.error(e); } } private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink, Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { if (document == null) { throw new IllegalArgumentException("document"); } logger.debug("Replacing a Document. documentLink: [{}]", documentLink); final String path = Utils.joinPath(documentLink, null); final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Replace); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer content = serializeJsonToByteBuffer(document); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, content, document, options, collectionObs); return requestObs.flatMap(req -> replace(request, retryPolicyInstance) .map(resp -> toResourceResponse(resp, Document.class))); } @Override public Mono<ResourceResponse<Document>> patchDocument(String documentLink, CosmosPatchOperations cosmosPatchOperations, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> patchDocumentInternal(documentLink, cosmosPatchOperations, options, documentClientRetryPolicy), documentClientRetryPolicy); } private Mono<ResourceResponse<Document>> patchDocumentInternal(String documentLink, CosmosPatchOperations cosmosPatchOperations, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { checkArgument(StringUtils.isNotEmpty(documentLink), "expected non empty documentLink"); checkNotNull(cosmosPatchOperations, "expected non null cosmosPatchOperations"); logger.debug("Running patch operations on Document. documentLink: [{}]", documentLink); final String path = Utils.joinPath(documentLink, null); final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Patch); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer content = ByteBuffer.wrap(PatchUtil.serializeCosmosPatchToByteArray(cosmosPatchOperations, options)); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); final RxDocumentServiceRequest request = RxDocumentServiceRequest.create( this, OperationType.Patch, ResourceType.Document, path, requestHeaders, options, content); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation( request, null, null, options, collectionObs); return requestObs.flatMap(req -> patch(request, retryPolicyInstance) .map(resp -> toResourceResponse(resp, Document.class))); } @Override public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(documentLink, null, options, requestRetryPolicy), requestRetryPolicy); } @Override public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(documentLink, internalObjectNode, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<Document>> deleteDocumentInternal(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } logger.debug("Deleting a Document. documentLink: [{}]", documentLink); String path = Utils.joinPath(documentLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Document, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, internalObjectNode, options, collectionObs); return requestObs.flatMap(req -> this .delete(req, retryPolicyInstance, getOperationContextAndListenerTuple(options)) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class))); } catch (Exception e) { logger.debug("Failure in deleting a document due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKey(String collectionLink, PartitionKey partitionKey, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteAllDocumentsByPartitionKeyInternal(collectionLink, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKeyInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Deleting all items by Partition Key. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.PartitionKey, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.PartitionKey, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs); return requestObs.flatMap(req -> this .deleteAllItemsByPartitionKey(req, retryPolicyInstance, getOperationContextAndListenerTuple(options)) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class))); } catch (Exception e) { logger.debug("Failure in deleting documents due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readDocumentInternal(documentLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Document>> readDocumentInternal(String documentLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } logger.debug("Reading a Document. documentLink: [{}]", documentLink); String path = Utils.joinPath(documentLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Document, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs); return requestObs.flatMap(req -> { return this.read(request, retryPolicyInstance).map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); }); } catch (Exception e) { logger.debug("Failure in reading a document due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Flux<FeedResponse<Document>> readDocuments(String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return queryDocuments(collectionLink, "SELECT * FROM r", options); } @Override public <T> Mono<FeedResponse<T>> readMany( List<CosmosItemIdentity> itemIdentityList, String collectionLink, CosmosQueryRequestOptions options, Class<T> klass) { String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Query, ResourceType.Document, collectionLink, null ); Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request); return collectionObs .flatMap(documentCollectionResourceResponse -> { final DocumentCollection collection = documentCollectionResourceResponse.v; if (collection == null) { throw new IllegalStateException("Collection cannot be null"); } final PartitionKeyDefinition pkDefinition = collection.getPartitionKey(); Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache .tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collection.getResourceId(), null, null); return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> { Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap = new HashMap<>(); CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v; if (routingMap == null) { throw new IllegalStateException("Failed to get routing map."); } itemIdentityList .forEach(itemIdentity -> { String effectivePartitionKeyString = PartitionKeyInternalHelper .getEffectivePartitionKeyString( BridgeInternal.getPartitionKeyInternal( itemIdentity.getPartitionKey()), pkDefinition); PartitionKeyRange range = routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString); if (partitionRangeItemKeyMap.get(range) == null) { List<CosmosItemIdentity> list = new ArrayList<>(); list.add(itemIdentity); partitionRangeItemKeyMap.put(range, list); } else { List<CosmosItemIdentity> pairs = partitionRangeItemKeyMap.get(range); pairs.add(itemIdentity); partitionRangeItemKeyMap.put(range, pairs); } }); Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap; rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap, collection.getPartitionKey()); return createReadManyQuery( resourceLink, new SqlQuerySpec(DUMMY_SQL_QUERY), options, Document.class, ResourceType.Document, collection, Collections.unmodifiableMap(rangeQueryMap)) .collectList() .map(feedList -> { List<T> finalList = new ArrayList<>(); HashMap<String, String> headers = new HashMap<>(); ConcurrentMap<String, QueryMetrics> aggregatedQueryMetrics = new ConcurrentHashMap<>(); double requestCharge = 0; for (FeedResponse<Document> page : feedList) { ConcurrentMap<String, QueryMetrics> pageQueryMetrics = ModelBridgeInternal.queryMetrics(page); if (pageQueryMetrics != null) { pageQueryMetrics.forEach( aggregatedQueryMetrics::putIfAbsent); } requestCharge += page.getRequestCharge(); finalList.addAll(page.getResults().stream().map(document -> ModelBridgeInternal.toObjectFromJsonSerializable(document, klass)).collect(Collectors.toList())); } headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double .toString(requestCharge)); FeedResponse<T> frp = BridgeInternal .createFeedResponse(finalList, headers); return frp; }); }); } ); } private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap( Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap, PartitionKeyDefinition partitionKeyDefinition) { Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>(); String partitionKeySelector = createPkSelector(partitionKeyDefinition); for(Map.Entry<PartitionKeyRange, List<CosmosItemIdentity>> entry: partitionRangeItemKeyMap.entrySet()) { SqlQuerySpec sqlQuerySpec; if (partitionKeySelector.equals("[\"id\"]")) { sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(entry.getValue(), partitionKeySelector); } else { sqlQuerySpec = createReadManyQuerySpec(entry.getValue(), partitionKeySelector); } rangeQueryMap.put(entry.getKey(), sqlQuerySpec); } return rangeQueryMap; } private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame( List<CosmosItemIdentity> idPartitionKeyPairList, String partitionKeySelector) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( "); for (int i = 0; i < idPartitionKeyPairList.size(); i++) { CosmosItemIdentity itemIdentity = idPartitionKeyPairList.get(i); String idValue = itemIdentity.getId(); String idParamName = "@param" + i; PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey(); Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey); if (!Objects.equals(idValue, pkValue)) { continue; } parameters.add(new SqlParameter(idParamName, idValue)); queryStringBuilder.append(idParamName); if (i < idPartitionKeyPairList.size() - 1) { queryStringBuilder.append(", "); } } queryStringBuilder.append(" )"); return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } private SqlQuerySpec createReadManyQuerySpec(List<CosmosItemIdentity> itemIdentities, String partitionKeySelector) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE ( "); for (int i = 0; i < itemIdentities.size(); i++) { CosmosItemIdentity itemIdentity = itemIdentities.get(i); PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey(); Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey); String pkParamName = "@param" + (2 * i); parameters.add(new SqlParameter(pkParamName, pkValue)); String idValue = itemIdentity.getId(); String idParamName = "@param" + (2 * i + 1); parameters.add(new SqlParameter(idParamName, idValue)); queryStringBuilder.append("("); queryStringBuilder.append("c.id = "); queryStringBuilder.append(idParamName); queryStringBuilder.append(" AND "); queryStringBuilder.append(" c"); queryStringBuilder.append(partitionKeySelector); queryStringBuilder.append((" = ")); queryStringBuilder.append(pkParamName); queryStringBuilder.append(" )"); if (i < itemIdentities.size() - 1) { queryStringBuilder.append(" OR "); } } queryStringBuilder.append(" )"); return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } private String createPkSelector(PartitionKeyDefinition partitionKeyDefinition) { return partitionKeyDefinition.getPaths() .stream() .map(pathPart -> StringUtils.substring(pathPart, 1)) .map(pathPart -> StringUtils.replace(pathPart, "\"", "\\")) .map(part -> "[\"" + part + "\"]") .collect(Collectors.joining()); } private <T extends Resource> Flux<FeedResponse<T>> createReadManyQuery( String parentResourceLink, SqlQuerySpec sqlQuery, CosmosQueryRequestOptions options, Class<T> klass, ResourceType resourceTypeEnum, DocumentCollection collection, Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) { UUID activityId = Utils.randomUUID(); IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options)); Flux<? extends IDocumentQueryExecutionContext<T>> executionContext = DocumentQueryExecutionContextFactory.createReadManyQueryAsync(this, queryClient, collection.getResourceId(), sqlQuery, rangeQueryMap, options, collection.getResourceId(), parentResourceLink, activityId, klass, resourceTypeEnum); return executionContext.flatMap(IDocumentQueryExecutionContext<T>::executeAsync); } @Override public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, String query, CosmosQueryRequestOptions options) { return queryDocuments(collectionLink, new SqlQuerySpec(query), options); } private IDocumentQueryClient documentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl, OperationContextAndListenerTuple operationContextAndListenerTuple) { return new IDocumentQueryClient () { @Override public RxCollectionCache getCollectionCache() { return RxDocumentClientImpl.this.collectionCache; } @Override public RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return RxDocumentClientImpl.this.partitionKeyRangeCache; } @Override public IRetryPolicyFactory getResetSessionTokenRetryPolicy() { return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy; } @Override public ConsistencyLevel getDefaultConsistencyLevelAsync() { return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel(); } @Override public ConsistencyLevel getDesiredConsistencyLevelAsync() { return RxDocumentClientImpl.this.consistencyLevel; } @Override public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) { if (operationContextAndListenerTuple == null) { return RxDocumentClientImpl.this.query(request).single(); } else { final OperationListener listener = operationContextAndListenerTuple.getOperationListener(); final OperationContext operationContext = operationContextAndListenerTuple.getOperationContext(); request.getHeaders().put(HttpConstants.HttpHeaders.CORRELATED_ACTIVITY_ID, operationContext.getCorrelationActivityId()); listener.requestListener(operationContext, request); return RxDocumentClientImpl.this.query(request).single().doOnNext( response -> listener.responseListener(operationContext, response) ).doOnError( ex -> listener.exceptionListener(operationContext, ex) ); } } @Override public QueryCompatibilityMode getQueryCompatibilityMode() { return QueryCompatibilityMode.Default; } @Override public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) { return null; } }; } @Override public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { SqlQuerySpecLogger.getInstance().logQuery(querySpec); return createQuery(collectionLink, querySpec, options, Document.class, ResourceType.Document); } @Override public Flux<FeedResponse<Document>> queryDocumentChangeFeed( final DocumentCollection collection, final CosmosChangeFeedRequestOptions changeFeedOptions) { checkNotNull(collection, "Argument 'collection' must not be null."); ChangeFeedQueryImpl<Document> changeFeedQueryImpl = new ChangeFeedQueryImpl<>( this, ResourceType.Document, Document.class, collection.getAltLink(), collection.getResourceId(), changeFeedOptions); return changeFeedQueryImpl.executeAsync(); } @Override public Flux<FeedResponse<Document>> readAllDocuments( String collectionLink, PartitionKey partitionKey, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (partitionKey == null) { throw new IllegalArgumentException("partitionKey"); } RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Query, ResourceType.Document, collectionLink, null ); Flux<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request).flux(); return collectionObs.flatMap(documentCollectionResourceResponse -> { DocumentCollection collection = documentCollectionResourceResponse.v; if (collection == null) { throw new IllegalStateException("Collection cannot be null"); } PartitionKeyDefinition pkDefinition = collection.getPartitionKey(); String pkSelector = createPkSelector(pkDefinition); SqlQuerySpec querySpec = createLogicalPartitionScanQuerySpec(partitionKey, pkSelector); String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document); UUID activityId = Utils.randomUUID(); IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options)); final CosmosQueryRequestOptions effectiveOptions = ModelBridgeInternal.createQueryRequestOptions(options); InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy( this.collectionCache, null, resourceLink, ModelBridgeInternal.getPropertiesFromQueryRequestOptions(effectiveOptions)); return ObservableHelper.fluxInlineIfPossibleAsObs( () -> { Flux<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = this.partitionKeyRangeCache .tryLookupAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collection.getResourceId(), null, null).flux(); return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> { CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v; if (routingMap == null) { throw new IllegalStateException("Failed to get routing map."); } String effectivePartitionKeyString = PartitionKeyInternalHelper .getEffectivePartitionKeyString( BridgeInternal.getPartitionKeyInternal(partitionKey), pkDefinition); PartitionKeyRange range = routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString); return createQueryInternal( resourceLink, querySpec, ModelBridgeInternal.setPartitionKeyRangeIdInternal(effectiveOptions, range.getId()), Document.class, ResourceType.Document, queryClient, activityId); }); }, invalidPartitionExceptionRetryPolicy); }); } @Override public Map<String, PartitionedQueryExecutionInfo> getQueryPlanCache() { return queryPlanCache; } @Override public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class, Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT)); } private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (storedProcedure == null) { throw new IllegalArgumentException("storedProcedure"); } validateResource(storedProcedure); String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options); return request; } private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (udf == null) { throw new IllegalArgumentException("udf"); } validateResource(udf); String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options); return request; } @Override public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink, StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]", collectionLink, storedProcedure.getId()); RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options, OperationType.Create); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedure(String collectionLink, StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedureInternal(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]", collectionLink, storedProcedure.getId()); RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in upserting a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (storedProcedure == null) { throw new IllegalArgumentException("storedProcedure"); } logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId()); RxDocumentClientImpl.validateResource(storedProcedure); String path = Utils.joinPath(storedProcedure.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(storedProcedureLink)) { throw new IllegalArgumentException("storedProcedureLink"); } logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.StoredProcedure, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(storedProcedureLink)) { throw new IllegalArgumentException("storedProcedureLink"); } logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.StoredProcedure, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.StoredProcedure, StoredProcedure.class, Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT)); } @Override public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query, CosmosQueryRequestOptions options) { return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(collectionLink, querySpec, options, StoredProcedure.class, ResourceType.StoredProcedure); } @Override public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink, List<Object> procedureParams) { return this.executeStoredProcedure(storedProcedureLink, null, procedureParams); } @Override public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink, RequestOptions options, List<Object> procedureParams) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy); } @Override public Mono<CosmosBatchResponse> executeBatchRequest(String collectionLink, ServerBatchRequest serverBatchRequest, RequestOptions options, boolean disableAutomaticIdGeneration) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> executeBatchRequestInternal(collectionLink, serverBatchRequest, options, documentClientRetryPolicy, disableAutomaticIdGeneration), documentClientRetryPolicy); } private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink, RequestOptions options, List<Object> procedureParams, DocumentClientRetryPolicy retryPolicy) { try { logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.ExecuteJavaScript); requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.ExecuteJavaScript, ResourceType.StoredProcedure, path, procedureParams != null && !procedureParams.isEmpty() ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "", requestHeaders, options); if (retryPolicy != null) { retryPolicy.onBeforeSendRequest(request); } Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options); return reqObs.flatMap(req -> create(request, retryPolicy, getOperationContextAndListenerTuple(options)) .map(response -> { this.captureSessionToken(request, response); return toStoredProcedureResponse(response); })); } catch (Exception e) { logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } private Mono<CosmosBatchResponse> executeBatchRequestInternal(String collectionLink, ServerBatchRequest serverBatchRequest, RequestOptions options, DocumentClientRetryPolicy requestRetryPolicy, boolean disableAutomaticIdGeneration) { try { logger.debug("Executing a Batch request with number of operations {}", serverBatchRequest.getOperations().size()); Mono<RxDocumentServiceRequest> requestObs = getBatchDocumentRequest(requestRetryPolicy, collectionLink, serverBatchRequest, options, disableAutomaticIdGeneration); Mono<RxDocumentServiceResponse> responseObservable = requestObs.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options))); return responseObservable .map(serviceResponse -> BatchResponseParser.fromDocumentServiceResponse(serviceResponse, serverBatchRequest, true)); } catch (Exception ex) { logger.debug("Failure in executing a batch due to [{}]", ex.getMessage(), ex); return Mono.error(ex); } } @Override public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink, trigger.getId()); RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options, OperationType.Create); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Trigger>> upsertTrigger(String collectionLink, Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> upsertTriggerInternal(String collectionLink, Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a Trigger. collectionLink [{}], trigger id [{}]", collectionLink, trigger.getId()); RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options, OperationType.Upsert); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in upserting a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (trigger == null) { throw new IllegalArgumentException("trigger"); } RxDocumentClientImpl.validateResource(trigger); String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.Trigger, path, trigger, requestHeaders, options); return request; } @Override public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (trigger == null) { throw new IllegalArgumentException("trigger"); } logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId()); RxDocumentClientImpl.validateResource(trigger); String path = Utils.joinPath(trigger.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Trigger, path, trigger, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(triggerLink)) { throw new IllegalArgumentException("triggerLink"); } logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink); String path = Utils.joinPath(triggerLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Trigger, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(triggerLink)) { throw new IllegalArgumentException("triggerLink"); } logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink); String path = Utils.joinPath(triggerLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Trigger, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.Trigger, Trigger.class, Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query, CosmosQueryRequestOptions options) { return queryTriggers(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(collectionLink, querySpec, options, Trigger.class, ResourceType.Trigger); } @Override public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink, UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink, UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink, udf.getId()); RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options, OperationType.Create); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunction(String collectionLink, UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunctionInternal(String collectionLink, UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink, udf.getId()); RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options, OperationType.Upsert); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in upserting a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (udf == null) { throw new IllegalArgumentException("udf"); } logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId()); validateResource(udf); String path = Utils.joinPath(udf.getSelfLink(), null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(udfLink)) { throw new IllegalArgumentException("udfLink"); } logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink); String path = Utils.joinPath(udfLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.UserDefinedFunction, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(udfLink)) { throw new IllegalArgumentException("udfLink"); } logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink); String path = Utils.joinPath(udfLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.UserDefinedFunction, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.UserDefinedFunction, UserDefinedFunction.class, Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink, String query, CosmosQueryRequestOptions options) { return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(collectionLink, querySpec, options, UserDefinedFunction.class, ResourceType.UserDefinedFunction); } @Override public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(conflictLink)) { throw new IllegalArgumentException("conflictLink"); } logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink); String path = Utils.joinPath(conflictLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Conflict, path, requestHeaders, options); Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options); return reqObs.flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class)); }); } catch (Exception e) { logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.Conflict, Conflict.class, Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query, CosmosQueryRequestOptions options) { return queryConflicts(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(collectionLink, querySpec, options, Conflict.class, ResourceType.Conflict); } @Override public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(conflictLink)) { throw new IllegalArgumentException("conflictLink"); } logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink); String path = Utils.joinPath(conflictLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Conflict, path, requestHeaders, options); Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options); return reqObs.flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Conflict.class)); }); } catch (Exception e) { logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy); } private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId()); RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create); return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId()); RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (user == null) { throw new IllegalArgumentException("user"); } RxDocumentClientImpl.validateResource(user); String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.User, path, user, requestHeaders, options); return request; } @Override public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (user == null) { throw new IllegalArgumentException("user"); } logger.debug("Replacing a User. user id [{}]", user.getId()); RxDocumentClientImpl.validateResource(user); String path = Utils.joinPath(user.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.User, path, user, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } logger.debug("Deleting a User. userLink [{}]", userLink); String path = Utils.joinPath(userLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.User, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } logger.debug("Reading a User. userLink [{}]", userLink); String path = Utils.joinPath(userLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.User, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<User>> readUsers(String databaseLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return readFeed(options, ResourceType.User, User.class, Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, CosmosQueryRequestOptions options) { return queryUsers(databaseLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(databaseLink, querySpec, options, User.class, ResourceType.User); } @Override public Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKey(String clientEncryptionKeyLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readClientEncryptionKeyInternal(clientEncryptionKeyLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKeyInternal(String clientEncryptionKeyLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(clientEncryptionKeyLink)) { throw new IllegalArgumentException("clientEncryptionKeyLink"); } logger.debug("Reading a client encryption key. clientEncryptionKeyLink [{}]", clientEncryptionKeyLink); String path = Utils.joinPath(clientEncryptionKeyLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.ClientEncryptionKey, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class)); } catch (Exception e) { logger.debug("Failure in reading a client encryption key due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKey(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createClientEncryptionKeyInternal(databaseLink, clientEncryptionKey, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKeyInternal(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a client encryption key. databaseLink [{}], clientEncryptionKey id [{}]", databaseLink, clientEncryptionKey.getId()); RxDocumentServiceRequest request = getClientEncryptionKeyRequest(databaseLink, clientEncryptionKey, options, OperationType.Create); return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, ClientEncryptionKey.class)); } catch (Exception e) { logger.debug("Failure in creating a client encryption key due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getClientEncryptionKeyRequest(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (clientEncryptionKey == null) { throw new IllegalArgumentException("clientEncryptionKey"); } RxDocumentClientImpl.validateResource(clientEncryptionKey); String path = Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options); return request; } @Override public Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKey(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceClientEncryptionKeyInternal(clientEncryptionKey, nameBasedLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKeyInternal(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (clientEncryptionKey == null) { throw new IllegalArgumentException("clientEncryptionKey"); } logger.debug("Replacing a clientEncryptionKey. clientEncryptionKey id [{}]", clientEncryptionKey.getId()); RxDocumentClientImpl.validateResource(clientEncryptionKey); String path = Utils.joinPath(nameBasedLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class)); } catch (Exception e) { logger.debug("Failure in replacing a clientEncryptionKey due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<ClientEncryptionKey>> readClientEncryptionKeys(String databaseLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return readFeed(options, ResourceType.ClientEncryptionKey, ClientEncryptionKey.class, Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT)); } @Override public Flux<FeedResponse<ClientEncryptionKey>> queryClientEncryptionKeys(String databaseLink, String query, CosmosQueryRequestOptions options) { return queryClientEncryptionKeys(databaseLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<ClientEncryptionKey>> queryClientEncryptionKeys(String databaseLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(databaseLink, querySpec, options, ClientEncryptionKey.class, ResourceType.ClientEncryptionKey); } @Override public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy()); } private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId()); RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options, OperationType.Create); return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId()); RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } if (permission == null) { throw new IllegalArgumentException("permission"); } RxDocumentClientImpl.validateResource(permission); String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.Permission, path, permission, requestHeaders, options); return request; } @Override public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (permission == null) { throw new IllegalArgumentException("permission"); } logger.debug("Replacing a Permission. permission id [{}]", permission.getId()); RxDocumentClientImpl.validateResource(permission); String path = Utils.joinPath(permission.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Permission, path, permission, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(permissionLink)) { throw new IllegalArgumentException("permissionLink"); } logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink); String path = Utils.joinPath(permissionLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Permission, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) { try { if (StringUtils.isEmpty(permissionLink)) { throw new IllegalArgumentException("permissionLink"); } logger.debug("Reading a Permission. permissionLink [{}]", permissionLink); String path = Utils.joinPath(permissionLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Permission, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Permission>> readPermissions(String userLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } return readFeed(options, ResourceType.Permission, Permission.class, Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query, CosmosQueryRequestOptions options) { return queryPermissions(userLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(userLink, querySpec, options, Permission.class, ResourceType.Permission); } @Override public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy); } private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) { try { if (offer == null) { throw new IllegalArgumentException("offer"); } logger.debug("Replacing an Offer. offer id [{}]", offer.getId()); RxDocumentClientImpl.validateResource(offer); String path = Utils.joinPath(offer.getSelfLink(), null); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Offer, path, offer, null, null); return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class)); } catch (Exception e) { logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Offer>> readOffer(String offerLink) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(offerLink)) { throw new IllegalArgumentException("offerLink"); } logger.debug("Reading an Offer. offerLink [{}]", offerLink); String path = Utils.joinPath(offerLink, null); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Offer, path, (HashMap<String, String>)null, null); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class)); } catch (Exception e) { logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Offer>> readOffers(CosmosQueryRequestOptions options) { return readFeed(options, ResourceType.Offer, Offer.class, Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null)); } private <T extends Resource> Flux<FeedResponse<T>> readFeed(CosmosQueryRequestOptions options, ResourceType resourceType, Class<T> klass, String resourceLink) { if (options == null) { options = new CosmosQueryRequestOptions(); } Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(options); int maxPageSize = maxItemCount != null ? maxItemCount : -1; final CosmosQueryRequestOptions finalCosmosQueryRequestOptions = options; DocumentClientRetryPolicy retryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> { Map<String, String> requestHeaders = new HashMap<>(); if (continuationToken != null) { requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken); } requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize)); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, finalCosmosQueryRequestOptions); retryPolicy.onBeforeSendRequest(request); return request; }; Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc = request -> ObservableHelper .inlineIfPossibleAsObs(() -> readFeed(request).map(response -> toFeedResponsePage(response, klass)), retryPolicy); return Paginator.getPaginatedQueryResultAsObservable(options, createRequestFunc, executeFunc, klass, maxPageSize); } @Override public Flux<FeedResponse<Offer>> queryOffers(String query, CosmosQueryRequestOptions options) { return queryOffers(new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(null, querySpec, options, Offer.class, ResourceType.Offer); } @Override public Mono<DatabaseAccount> getDatabaseAccount() { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy), documentClientRetryPolicy); } @Override public DatabaseAccount getLatestDatabaseAccount() { return this.globalEndpointManager.getLatestDatabaseAccount(); } private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Getting Database Account"); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.DatabaseAccount, "", (HashMap<String, String>) null, null); return this.read(request, documentClientRetryPolicy).map(ModelBridgeInternal::toDatabaseAccount); } catch (Exception e) { logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e); return Mono.error(e); } } public Object getSession() { return this.sessionContainer; } public void setSession(Object sessionContainer) { this.sessionContainer = (SessionContainer) sessionContainer; } @Override public RxClientCollectionCache getCollectionCache() { return this.collectionCache; } @Override public RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return partitionKeyRangeCache; } public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) { return Flux.defer(() -> { RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.DatabaseAccount, "", null, (Object) null); return this.populateHeaders(request, RequestVerb.GET) .flatMap(requestPopulated -> { requestPopulated.setEndpointOverride(endpoint); return this.gatewayProxy.processMessage(requestPopulated).doOnError(e -> { String message = String.format("Failed to retrieve database account information. %s", e.getCause() != null ? e.getCause().toString() : e.toString()); logger.warn(message); }).map(rsp -> rsp.getResource(DatabaseAccount.class)) .doOnNext(databaseAccount -> this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount)); }); }); } /** * Certain requests must be routed through gateway even when the client connectivity mode is direct. * * @param request * @return RxStoreModel */ private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) { if (request.UseGatewayMode) { return this.gatewayProxy; } ResourceType resourceType = request.getResourceType(); OperationType operationType = request.getOperationType(); if (resourceType == ResourceType.Offer || resourceType == ResourceType.ClientEncryptionKey || resourceType.isScript() && operationType != OperationType.ExecuteJavaScript || resourceType == ResourceType.PartitionKeyRange || resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) { return this.gatewayProxy; } if (operationType == OperationType.Create || operationType == OperationType.Upsert) { if (resourceType == ResourceType.Database || resourceType == ResourceType.User || resourceType == ResourceType.DocumentCollection || resourceType == ResourceType.Permission) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Delete) { if (resourceType == ResourceType.Database || resourceType == ResourceType.User || resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Replace) { if (resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Read) { if (resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else { if ((operationType == OperationType.Query || operationType == OperationType.SqlQuery || operationType == OperationType.ReadFeed) && Utils.isCollectionChild(request.getResourceType())) { if (request.getPartitionKeyRangeIdentity() == null && request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY) == null) { return this.gatewayProxy; } } return this.storeModel; } } @Override public void close() { logger.info("Attempting to close client {}", this.clientId); if (!closed.getAndSet(true)) { logger.info("Shutting down ..."); logger.info("Closing Global Endpoint Manager ..."); LifeCycleUtils.closeQuietly(this.globalEndpointManager); logger.info("Closing StoreClientFactory ..."); LifeCycleUtils.closeQuietly(this.storeClientFactory); logger.info("Shutting down reactorHttpClient ..."); LifeCycleUtils.closeQuietly(this.reactorHttpClient); logger.info("Shutting down CpuMonitor ..."); CpuMemoryMonitor.unregister(this); if (this.throughputControlEnabled.get()) { logger.info("Closing ThroughputControlStore ..."); this.throughputControlStore.close(); } logger.info("Shutting down completed."); } else { logger.warn("Already shutdown!"); } } @Override public ItemDeserializer getItemDeserializer() { return this.itemDeserializer; } @Override public synchronized void enableThroughputControlGroup(ThroughputControlGroupInternal group) { checkNotNull(group, "Throughput control group can not be null"); if (this.throughputControlEnabled.compareAndSet(false, true)) { this.throughputControlStore = new ThroughputControlStore( this.collectionCache, this.connectionPolicy.getConnectionMode(), this.partitionKeyRangeCache); this.storeModel.enableThroughputControl(throughputControlStore); } this.throughputControlStore.enableThroughputControlGroup(group); } private static SqlQuerySpec createLogicalPartitionScanQuerySpec( PartitionKey partitionKey, String partitionKeySelector) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE"); Object pkValue = ModelBridgeInternal.getPartitionKeyObject(partitionKey); String pkParamName = "@pkValue"; parameters.add(new SqlParameter(pkParamName, pkValue)); queryStringBuilder.append(" c"); queryStringBuilder.append(partitionKeySelector); queryStringBuilder.append((" = ")); queryStringBuilder.append(pkParamName); return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } @Override public Mono<List<FeedRange>> getFeedRanges(String collectionLink) { InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy( this.collectionCache, null, collectionLink, new HashMap<>()); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( this, OperationType.Query, ResourceType.Document, collectionLink, null); invalidPartitionExceptionRetryPolicy.onBeforeSendRequest(request); return ObservableHelper.inlineIfPossibleAsObs( () -> getFeedRangesInternal(request, collectionLink), invalidPartitionExceptionRetryPolicy); } private Mono<List<FeedRange>> getFeedRangesInternal(RxDocumentServiceRequest request, String collectionLink) { logger.debug("getFeedRange collectionLink=[{}]", collectionLink); if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request); return collectionObs.flatMap(documentCollectionResourceResponse -> { final DocumentCollection collection = documentCollectionResourceResponse.v; if (collection == null) { throw new IllegalStateException("Collection cannot be null"); } Mono<Utils.ValueHolder<List<PartitionKeyRange>>> valueHolderMono = partitionKeyRangeCache .tryGetOverlappingRangesAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collection.getResourceId(), RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES, true, null); return valueHolderMono.map(partitionKeyRangeList -> toFeedRanges(partitionKeyRangeList, request)); }); } private static List<FeedRange> toFeedRanges( Utils.ValueHolder<List<PartitionKeyRange>> partitionKeyRangeListValueHolder, RxDocumentServiceRequest request) { final List<PartitionKeyRange> partitionKeyRangeList = partitionKeyRangeListValueHolder.v; if (partitionKeyRangeList == null) { request.forceNameCacheRefresh = true; throw new InvalidPartitionException(); } List<FeedRange> feedRanges = new ArrayList<>(); partitionKeyRangeList.forEach(pkRange -> feedRanges.add(toFeedRange(pkRange))); return feedRanges; } private static FeedRange toFeedRange(PartitionKeyRange pkRange) { return new FeedRangeEpkImpl(pkRange.toRange()); } }
class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider, CpuMemoryListener, DiagnosticsClientContext { private static final AtomicInteger activeClientsCnt = new AtomicInteger(0); private static final AtomicInteger clientIdGenerator = new AtomicInteger(0); private static final Range<String> RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES = new Range<>( PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey, PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey, true, false); private static final String DUMMY_SQL_QUERY = "this is dummy and only used in creating " + "ParallelDocumentQueryExecutioncontext, but not used"; private final static ObjectMapper mapper = Utils.getSimpleObjectMapper(); private final ItemDeserializer itemDeserializer = new ItemDeserializer.JsonDeserializer(); private final Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class); private final String masterKeyOrResourceToken; private final URI serviceEndpoint; private final ConnectionPolicy connectionPolicy; private final ConsistencyLevel consistencyLevel; private final BaseAuthorizationTokenProvider authorizationTokenProvider; private final UserAgentContainer userAgentContainer; private final boolean hasAuthKeyResourceToken; private final Configs configs; private final boolean connectionSharingAcrossClientsEnabled; private AzureKeyCredential credential; private final TokenCredential tokenCredential; private String[] tokenCredentialScopes; private SimpleTokenCache tokenCredentialCache; private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver; AuthorizationTokenType authorizationTokenType; private SessionContainer sessionContainer; private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY; private RxClientCollectionCache collectionCache; private RxStoreModel gatewayProxy; private RxStoreModel storeModel; private GlobalAddressResolver addressResolver; private RxPartitionKeyRangeCache partitionKeyRangeCache; private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap; private final boolean contentResponseOnWriteEnabled; private Map<String, PartitionedQueryExecutionInfo> queryPlanCache; private final AtomicBoolean closed = new AtomicBoolean(false); private final int clientId; private ClientTelemetry clientTelemetry; private ApiType apiType; private IRetryPolicyFactory resetSessionTokenRetryPolicy; /** * Compatibility mode: Allows to specify compatibility mode used by client when * making query requests. Should be removed when application/sql is no longer * supported. */ private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default; private final GlobalEndpointManager globalEndpointManager; private final RetryPolicy retryPolicy; private HttpClient reactorHttpClient; private Function<HttpClient, HttpClient> httpClientInterceptor; private volatile boolean useMultipleWriteLocations; private StoreClientFactory storeClientFactory; private GatewayServiceConfigurationReader gatewayConfigurationReader; private final DiagnosticsClientConfig diagnosticsClientConfig; private final AtomicBoolean throughputControlEnabled; private ThroughputControlStore throughputControlStore; public RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver, AzureKeyCredential credential, boolean sessionCapturingOverride, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType) { this(serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs, credential, null, sessionCapturingOverride, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled, metadataCachesSnapshot, apiType); this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver; } public RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver, AzureKeyCredential credential, TokenCredential tokenCredential, boolean sessionCapturingOverride, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType) { this(serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs, credential, tokenCredential, sessionCapturingOverride, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled, metadataCachesSnapshot, apiType); this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver; } private RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, AzureKeyCredential credential, TokenCredential tokenCredential, boolean sessionCapturingOverrideEnabled, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType) { this(serviceEndpoint, masterKeyOrResourceToken, connectionPolicy, consistencyLevel, configs, credential, tokenCredential, sessionCapturingOverrideEnabled, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled, metadataCachesSnapshot, apiType); if (permissionFeed != null && permissionFeed.size() > 0) { this.resourceTokensMap = new HashMap<>(); for (Permission permission : permissionFeed) { String[] segments = StringUtils.split(permission.getResourceLink(), Constants.Properties.PATH_SEPARATOR.charAt(0)); if (segments.length <= 0) { throw new IllegalArgumentException("resourceLink"); } List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null; PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false); if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) { throw new IllegalArgumentException(permission.getResourceLink()); } partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName); if (partitionKeyAndResourceTokenPairs == null) { partitionKeyAndResourceTokenPairs = new ArrayList<>(); this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs); } PartitionKey partitionKey = permission.getResourcePartitionKey(); partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair( partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty, permission.getToken())); logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token [{}]", pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken()); } if(this.resourceTokensMap.isEmpty()) { throw new IllegalArgumentException("permissionFeed"); } String firstToken = permissionFeed.get(0).getToken(); if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) { this.firstResourceTokenFromPermissionFeed = firstToken; } } } RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, AzureKeyCredential credential, TokenCredential tokenCredential, boolean sessionCapturingOverrideEnabled, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType) { activeClientsCnt.incrementAndGet(); this.clientId = clientIdGenerator.getAndDecrement(); this.diagnosticsClientConfig = new DiagnosticsClientConfig(); this.diagnosticsClientConfig.withClientId(this.clientId); this.diagnosticsClientConfig.withActiveClientCounter(activeClientsCnt); this.diagnosticsClientConfig.withConnectionSharingAcrossClientsEnabled(connectionSharingAcrossClientsEnabled); this.diagnosticsClientConfig.withConsistency(consistencyLevel); this.throughputControlEnabled = new AtomicBoolean(false); logger.info( "Initializing DocumentClient [{}] with" + " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}], directModeProtocol [{}]", this.clientId, serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol()); try { this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled; this.configs = configs; this.masterKeyOrResourceToken = masterKeyOrResourceToken; this.serviceEndpoint = serviceEndpoint; this.credential = credential; this.tokenCredential = tokenCredential; this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled; this.authorizationTokenType = AuthorizationTokenType.Invalid; if (this.credential != null) { hasAuthKeyResourceToken = false; this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey; this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential); } else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) { this.authorizationTokenProvider = null; hasAuthKeyResourceToken = true; this.authorizationTokenType = AuthorizationTokenType.ResourceToken; } else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) { this.credential = new AzureKeyCredential(this.masterKeyOrResourceToken); hasAuthKeyResourceToken = false; this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey; this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential); } else { hasAuthKeyResourceToken = false; this.authorizationTokenProvider = null; if (tokenCredential != null) { this.tokenCredentialScopes = new String[] { serviceEndpoint.getScheme() + ": }; this.tokenCredentialCache = new SimpleTokenCache(() -> this.tokenCredential .getToken(new TokenRequestContext().addScopes(this.tokenCredentialScopes))); this.authorizationTokenType = AuthorizationTokenType.AadToken; } } if (connectionPolicy != null) { this.connectionPolicy = connectionPolicy; } else { this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig()); } this.diagnosticsClientConfig.withConnectionMode(this.getConnectionPolicy().getConnectionMode()); this.diagnosticsClientConfig.withMultipleWriteRegionsEnabled(this.connectionPolicy.isMultipleWriteRegionsEnabled()); this.diagnosticsClientConfig.withEndpointDiscoveryEnabled(this.connectionPolicy.isEndpointDiscoveryEnabled()); this.diagnosticsClientConfig.withPreferredRegions(this.connectionPolicy.getPreferredRegions()); boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled); this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing); this.consistencyLevel = consistencyLevel; this.userAgentContainer = new UserAgentContainer(); String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix(); if (userAgentSuffix != null && userAgentSuffix.length() > 0) { userAgentContainer.setSuffix(userAgentSuffix); } this.httpClientInterceptor = null; this.reactorHttpClient = httpClient(); this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs); this.retryPolicy = new RetryPolicy(this, this.globalEndpointManager, this.connectionPolicy); this.resetSessionTokenRetryPolicy = retryPolicy; CpuMemoryMonitor.register(this); this.queryPlanCache = Collections.synchronizedMap(new SizeLimitingLRUCache(Constants.QUERYPLAN_CACHE_SIZE)); this.apiType = apiType; } catch (RuntimeException e) { logger.error("unexpected failure in initializing client.", e); close(); throw e; } } @Override public DiagnosticsClientConfig getConfig() { return diagnosticsClientConfig; } @Override public CosmosDiagnostics createDiagnostics() { return BridgeInternal.createCosmosDiagnostics(this, this.globalEndpointManager); } private void initializeGatewayConfigurationReader() { this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.globalEndpointManager); DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount(); if (databaseAccount == null) { logger.error("Client initialization failed." + " Check if the endpoint is reachable and if your auth token is valid. More info: https: throw new RuntimeException("Client initialization failed." + " Check if the endpoint is reachable and if your auth token is valid. More info: https: } this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount); } private void updateGatewayProxy() { ((RxGatewayStoreModel)this.gatewayProxy).setGatewayServiceConfigurationReader(this.gatewayConfigurationReader); ((RxGatewayStoreModel)this.gatewayProxy).setCollectionCache(this.collectionCache); ((RxGatewayStoreModel)this.gatewayProxy).setPartitionKeyRangeCache(this.partitionKeyRangeCache); ((RxGatewayStoreModel)this.gatewayProxy).setUseMultipleWriteLocations(this.useMultipleWriteLocations); } public void serialize(CosmosClientMetadataCachesSnapshot state) { RxCollectionCache.serialize(state, this.collectionCache); } private void initializeDirectConnectivity() { this.addressResolver = new GlobalAddressResolver(this, this.reactorHttpClient, this.globalEndpointManager, this.configs.getProtocol(), this, this.collectionCache, this.partitionKeyRangeCache, userAgentContainer, null, this.connectionPolicy, this.apiType); this.storeClientFactory = new StoreClientFactory( this.addressResolver, this.diagnosticsClientConfig, this.configs, this.connectionPolicy, this.userAgentContainer, this.connectionSharingAcrossClientsEnabled, this.clientTelemetry ); this.createStoreModel(true); } DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() { return new DatabaseAccountManagerInternal() { @Override public URI getServiceEndpoint() { return RxDocumentClientImpl.this.getServiceEndpoint(); } @Override public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) { logger.info("Getting database account endpoint from {}", endpoint); return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint); } @Override public ConnectionPolicy getConnectionPolicy() { return RxDocumentClientImpl.this.getConnectionPolicy(); } }; } RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer, ConsistencyLevel consistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient, ApiType apiType) { return new RxGatewayStoreModel( this, sessionContainer, consistencyLevel, queryCompatibilityMode, userAgentContainer, globalEndpointManager, httpClient, apiType); } private HttpClient httpClient() { HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs) .withMaxIdleConnectionTimeout(this.connectionPolicy.getIdleHttpConnectionTimeout()) .withPoolSize(this.connectionPolicy.getMaxConnectionPoolSize()) .withProxy(this.connectionPolicy.getProxy()) .withNetworkRequestTimeout(this.connectionPolicy.getHttpNetworkRequestTimeout()); if (connectionSharingAcrossClientsEnabled) { return SharedGatewayHttpClient.getOrCreateInstance(httpClientConfig, diagnosticsClientConfig); } else { diagnosticsClientConfig.withGatewayHttpClientConfig(httpClientConfig); return HttpClient.createFixed(httpClientConfig); } } private void createStoreModel(boolean subscribeRntbdStatus) { StoreClient storeClient = this.storeClientFactory.createStoreClient(this, this.addressResolver, this.sessionContainer, this.gatewayConfigurationReader, this, this.useMultipleWriteLocations ); this.storeModel = new ServerStoreModel(storeClient); } @Override public URI getServiceEndpoint() { return this.serviceEndpoint; } @Override public URI getWriteEndpoint() { return globalEndpointManager.getWriteEndpoints().stream().findFirst().orElse(null); } @Override public URI getReadEndpoint() { return globalEndpointManager.getReadEndpoints().stream().findFirst().orElse(null); } @Override public ConnectionPolicy getConnectionPolicy() { return this.connectionPolicy; } @Override public boolean isContentResponseOnWriteEnabled() { return contentResponseOnWriteEnabled; } @Override public ConsistencyLevel getConsistencyLevel() { return consistencyLevel; } @Override public ClientTelemetry getClientTelemetry() { return this.clientTelemetry; } @Override public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (database == null) { throw new IllegalArgumentException("Database"); } logger.debug("Creating a Database. id: [{}]", database.getId()); validateResource(database); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Create); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(database); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.DATABASE_SERIALIZATION); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Create, ResourceType.Database, Paths.DATABASES_ROOT, byteBuffer, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)) .map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink); String path = Utils.joinPath(databaseLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Database, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } logger.debug("Reading a Database. databaseLink: [{}]", databaseLink); String path = Utils.joinPath(databaseLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Database, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Database>> readDatabases(CosmosQueryRequestOptions options) { return readFeed(options, ResourceType.Database, Database.class, Paths.DATABASES_ROOT); } private String parentResourceLinkToQueryLink(String parentResourceLink, ResourceType resourceTypeEnum) { switch (resourceTypeEnum) { case Database: return Paths.DATABASES_ROOT; case DocumentCollection: return Utils.joinPath(parentResourceLink, Paths.COLLECTIONS_PATH_SEGMENT); case Document: return Utils.joinPath(parentResourceLink, Paths.DOCUMENTS_PATH_SEGMENT); case Offer: return Paths.OFFERS_ROOT; case User: return Utils.joinPath(parentResourceLink, Paths.USERS_PATH_SEGMENT); case ClientEncryptionKey: return Utils.joinPath(parentResourceLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT); case Permission: return Utils.joinPath(parentResourceLink, Paths.PERMISSIONS_PATH_SEGMENT); case Attachment: return Utils.joinPath(parentResourceLink, Paths.ATTACHMENTS_PATH_SEGMENT); case StoredProcedure: return Utils.joinPath(parentResourceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT); case Trigger: return Utils.joinPath(parentResourceLink, Paths.TRIGGERS_PATH_SEGMENT); case UserDefinedFunction: return Utils.joinPath(parentResourceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); case Conflict: return Utils.joinPath(parentResourceLink, Paths.CONFLICTS_PATH_SEGMENT); default: throw new IllegalArgumentException("resource type not supported"); } } private OperationContextAndListenerTuple getOperationContextAndListenerTuple(CosmosQueryRequestOptions options) { if (options == null) { return null; } return ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor().getOperationContext(options); } private OperationContextAndListenerTuple getOperationContextAndListenerTuple(RequestOptions options) { if (options == null) { return null; } return options.getOperationContextAndListenerTuple(); } private <T extends Resource> Flux<FeedResponse<T>> createQuery( String parentResourceLink, SqlQuerySpec sqlQuery, CosmosQueryRequestOptions options, Class<T> klass, ResourceType resourceTypeEnum) { String resourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum); UUID activityId = Utils.randomUUID(); IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options)); InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy( this.collectionCache, null, resourceLink, ModelBridgeInternal.getPropertiesFromQueryRequestOptions(options)); return ObservableHelper.fluxInlineIfPossibleAsObs( () -> createQueryInternal(resourceLink, sqlQuery, options, klass, resourceTypeEnum, queryClient, activityId), invalidPartitionExceptionRetryPolicy); } private <T extends Resource> Flux<FeedResponse<T>> createQueryInternal( String resourceLink, SqlQuerySpec sqlQuery, CosmosQueryRequestOptions options, Class<T> klass, ResourceType resourceTypeEnum, IDocumentQueryClient queryClient, UUID activityId) { Flux<? extends IDocumentQueryExecutionContext<T>> executionContext = DocumentQueryExecutionContextFactory .createDocumentQueryExecutionContextAsync(this, queryClient, resourceTypeEnum, klass, sqlQuery, options, resourceLink, false, activityId, Configs.isQueryPlanCachingEnabled(), queryPlanCache); AtomicBoolean isFirstResponse = new AtomicBoolean(true); return executionContext.flatMap(iDocumentQueryExecutionContext -> { QueryInfo queryInfo = null; if (iDocumentQueryExecutionContext instanceof PipelinedDocumentQueryExecutionContext) { queryInfo = ((PipelinedDocumentQueryExecutionContext<T>) iDocumentQueryExecutionContext).getQueryInfo(); } QueryInfo finalQueryInfo = queryInfo; return iDocumentQueryExecutionContext.executeAsync() .map(tFeedResponse -> { if (finalQueryInfo != null) { if (finalQueryInfo.hasSelectValue()) { ModelBridgeInternal .addQueryInfoToFeedResponse(tFeedResponse, finalQueryInfo); } if (isFirstResponse.compareAndSet(true, false)) { ModelBridgeInternal.addQueryPlanDiagnosticsContextToFeedResponse(tFeedResponse, finalQueryInfo.getQueryPlanDiagnosticsContext()); } } return tFeedResponse; }); }); } @Override public Flux<FeedResponse<Database>> queryDatabases(String query, CosmosQueryRequestOptions options) { return queryDatabases(new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(Paths.DATABASES_ROOT, querySpec, options, Database.class, ResourceType.Database); } @Override public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink, DocumentCollection collection, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink, DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (collection == null) { throw new IllegalArgumentException("collection"); } logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink, collection.getId()); validateResource(collection); String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Create); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Create, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class)) .doOnNext(resourceResponse -> { this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(), getAltLink(resourceResponse.getResource()), resourceResponse.getResponseHeaders()); }); } catch (Exception e) { logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (collection == null) { throw new IllegalArgumentException("collection"); } logger.debug("Replacing a Collection. id: [{}]", collection.getId()); validateResource(collection); String path = Utils.joinPath(collection.getSelfLink(), null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Replace); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)) .doOnNext(resourceResponse -> { if (resourceResponse.getResource() != null) { this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(), getAltLink(resourceResponse.getResource()), resourceResponse.getResponseHeaders()); } }); } catch (Exception e) { logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.DocumentCollection, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class)); } catch (Exception e) { logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e); return Mono.error(e); } } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeaders(request, RequestVerb.DELETE) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple); }); } private Mono<RxDocumentServiceResponse> deleteAllItemsByPartitionKey(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeaders(request, RequestVerb.POST) .flatMap(requestPopulated -> { RxStoreModel storeProxy = this.getStoreProxy(requestPopulated); if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple); }); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { return populateHeaders(request, RequestVerb.GET) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated); }); } Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return populateHeaders(request, RequestVerb.GET) .flatMap(requestPopulated -> getStoreProxy(requestPopulated).processMessage(requestPopulated)); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { return populateHeaders(request, RequestVerb.POST) .flatMap(requestPopulated -> this.getStoreProxy(requestPopulated).processMessage(requestPopulated) .map(response -> { this.captureSessionToken(requestPopulated, response); return response; } )); } @Override public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.DocumentCollection, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)); } catch (Exception e) { logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return readFeed(options, ResourceType.DocumentCollection, DocumentCollection.class, Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query, CosmosQueryRequestOptions options) { return createQuery(databaseLink, new SqlQuerySpec(query), options, DocumentCollection.class, ResourceType.DocumentCollection); } @Override public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(databaseLink, querySpec, options, DocumentCollection.class, ResourceType.DocumentCollection); } private static String serializeProcedureParams(List<Object> objectArray) { String[] stringArray = new String[objectArray.size()]; for (int i = 0; i < objectArray.size(); ++i) { Object object = objectArray.get(i); if (object instanceof JsonSerializable) { stringArray[i] = ModelBridgeInternal.toJsonFromJsonSerializable((JsonSerializable) object); } else { try { stringArray[i] = mapper.writeValueAsString(object); } catch (IOException e) { throw new IllegalArgumentException("Can't serialize the object into the json string", e); } } } return String.format("[%s]", StringUtils.join(stringArray, ",")); } private static void validateResource(Resource resource) { if (!StringUtils.isEmpty(resource.getId())) { if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 || resource.getId().indexOf('?') != -1 || resource.getId().indexOf(' throw new IllegalArgumentException("Id contains illegal chars."); } if (resource.getId().endsWith(" ")) { throw new IllegalArgumentException("Id ends with a space."); } } } private Map<String, String> getRequestHeaders(RequestOptions options, ResourceType resourceType, OperationType operationType) { Map<String, String> headers = new HashMap<>(); if (this.useMultipleWriteLocations) { headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString()); } if (consistencyLevel != null) { headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString()); } if (options == null) { if (!this.contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) { headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL); } return headers; } Map<String, String> customOptions = options.getHeaders(); if (customOptions != null) { headers.putAll(customOptions); } boolean contentResponseOnWriteEnabled = this.contentResponseOnWriteEnabled; if (options.isContentResponseOnWriteEnabled() != null) { contentResponseOnWriteEnabled = options.isContentResponseOnWriteEnabled(); } if (!contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) { headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL); } if (options.getIfMatchETag() != null) { headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getIfMatchETag()); } if(options.getIfNoneMatchETag() != null) { headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getIfNoneMatchETag()); } if (options.getConsistencyLevel() != null) { headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString()); } if (options.getIndexingDirective() != null) { headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString()); } if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) { String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ","); headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude); } if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) { String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ","); headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude); } if (!Strings.isNullOrEmpty(options.getSessionToken())) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken()); } if (options.getResourceTokenExpirySeconds() != null) { headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY, String.valueOf(options.getResourceTokenExpirySeconds())); } if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) { headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString()); } else if (options.getOfferType() != null) { headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType()); } if (options.getOfferThroughput() == null) { if (options.getThroughputProperties() != null) { Offer offer = ModelBridgeInternal.getOfferFromThroughputProperties(options.getThroughputProperties()); final OfferAutoscaleSettings offerAutoscaleSettings = offer.getOfferAutoScaleSettings(); OfferAutoscaleAutoUpgradeProperties autoscaleAutoUpgradeProperties = null; if (offerAutoscaleSettings != null) { autoscaleAutoUpgradeProperties = offer.getOfferAutoScaleSettings().getAutoscaleAutoUpgradeProperties(); } if (offer.hasOfferThroughput() && (offerAutoscaleSettings != null && offerAutoscaleSettings.getMaxThroughput() >= 0 || autoscaleAutoUpgradeProperties != null && autoscaleAutoUpgradeProperties .getAutoscaleThroughputProperties() .getIncrementPercent() >= 0)) { throw new IllegalArgumentException("Autoscale provisioned throughput can not be configured with " + "fixed offer"); } if (offer.hasOfferThroughput()) { headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, String.valueOf(offer.getThroughput())); } else if (offer.getOfferAutoScaleSettings() != null) { headers.put(HttpConstants.HttpHeaders.OFFER_AUTOPILOT_SETTINGS, ModelBridgeInternal.toJsonFromJsonSerializable(offer.getOfferAutoScaleSettings())); } } } if (options.isQuotaInfoEnabled()) { headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true)); } if (options.isScriptLoggingEnabled()) { headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true)); } if (options.getDedicatedGatewayRequestOptions() != null && options.getDedicatedGatewayRequestOptions().getMaxIntegratedCacheStaleness() != null) { headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_CACHE_STALENESS, String.valueOf(Utils.getMaxIntegratedCacheStalenessInMillis(options.getDedicatedGatewayRequestOptions()))); } return headers; } public IRetryPolicyFactory getResetSessionTokenRetryPolicy() { return this.resetSessionTokenRetryPolicy; } private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request, ByteBuffer contentAsByteBuffer, Document document, RequestOptions options) { Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); return collectionObs .map(collectionValueHolder -> { addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v); return request; }); } private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request, ByteBuffer contentAsByteBuffer, Object document, RequestOptions options, Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) { return collectionObs.map(collectionValueHolder -> { addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v); return request; }); } private void addPartitionKeyInformation(RxDocumentServiceRequest request, ByteBuffer contentAsByteBuffer, Object objectDoc, RequestOptions options, DocumentCollection collection) { PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey(); PartitionKeyInternal partitionKeyInternal = null; if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){ partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition); } else if (options != null && options.getPartitionKey() != null) { partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey()); } else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) { partitionKeyInternal = PartitionKeyInternal.getEmpty(); } else if (contentAsByteBuffer != null || objectDoc != null) { InternalObjectNode internalObjectNode; if (objectDoc instanceof InternalObjectNode) { internalObjectNode = (InternalObjectNode) objectDoc; } else if (objectDoc instanceof ObjectNode) { internalObjectNode = new InternalObjectNode((ObjectNode)objectDoc); } else if (contentAsByteBuffer != null) { contentAsByteBuffer.rewind(); internalObjectNode = new InternalObjectNode(contentAsByteBuffer); } else { throw new IllegalStateException("ContentAsByteBuffer and objectDoc are null"); } Instant serializationStartTime = Instant.now(); partitionKeyInternal = extractPartitionKeyValueFromDocument(internalObjectNode, partitionKeyDefinition); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTime, serializationEndTime, SerializationDiagnosticsContext.SerializationType.PARTITION_KEY_FETCH_SERIALIZATION ); SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } } else { throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation."); } request.setPartitionKeyInternal(partitionKeyInternal); request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson())); } public static PartitionKeyInternal extractPartitionKeyValueFromDocument( InternalObjectNode document, PartitionKeyDefinition partitionKeyDefinition) { if (partitionKeyDefinition != null) { switch (partitionKeyDefinition.getKind()) { case HASH: String path = partitionKeyDefinition.getPaths().iterator().next(); List<String> parts = PathParser.getPathParts(path); if (parts.size() >= 1) { Object value = ModelBridgeInternal.getObjectByPathFromJsonSerializable(document, parts); if (value == null || value.getClass() == ObjectNode.class) { value = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition); } if (value instanceof PartitionKeyInternal) { return (PartitionKeyInternal) value; } else { return PartitionKeyInternal.fromObjectArray(Collections.singletonList(value), false); } } break; case MULTI_HASH: Object[] partitionKeyValues = new Object[partitionKeyDefinition.getPaths().size()]; for(int pathIter = 0 ; pathIter < partitionKeyDefinition.getPaths().size(); pathIter++){ String partitionPath = partitionKeyDefinition.getPaths().get(pathIter); List<String> partitionPathParts = PathParser.getPathParts(partitionPath); partitionKeyValues[pathIter] = ModelBridgeInternal.getObjectByPathFromJsonSerializable(document, partitionPathParts); } return PartitionKeyInternal.fromObjectArray(partitionKeyValues, false); default: throw new IllegalArgumentException("Unrecognized Partition kind: " + partitionKeyDefinition.getKind()); } } return null; } private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy, String documentCollectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, OperationType operationType) { if (StringUtils.isEmpty(documentCollectionLink)) { throw new IllegalArgumentException("documentCollectionLink"); } if (document == null) { throw new IllegalArgumentException("document"); } Instant serializationStartTimeUTC = Instant.now(); ByteBuffer content = BridgeInternal.serializeJsonToByteBuffer(document, mapper); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.Document, path, requestHeaders, options, content); if (requestRetryPolicy != null) { requestRetryPolicy.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); return addPartitionKeyInformation(request, content, document, options, collectionObs); } private Mono<RxDocumentServiceRequest> getBatchDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy, String documentCollectionLink, ServerBatchRequest serverBatchRequest, RequestOptions options, boolean disableAutomaticIdGeneration) { checkArgument(StringUtils.isNotEmpty(documentCollectionLink), "expected non empty documentCollectionLink"); checkNotNull(serverBatchRequest, "expected non null serverBatchRequest"); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer content = ByteBuffer.wrap(Utils.getUTF8Bytes(serverBatchRequest.getRequestBody())); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Batch); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( this, OperationType.Batch, ResourceType.Document, path, requestHeaders, options, content); if (requestRetryPolicy != null) { requestRetryPolicy.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); return collectionObs.map((Utils.ValueHolder<DocumentCollection> collectionValueHolder) -> { addBatchHeaders(request, serverBatchRequest, collectionValueHolder.v); return request; }); } private RxDocumentServiceRequest addBatchHeaders(RxDocumentServiceRequest request, ServerBatchRequest serverBatchRequest, DocumentCollection collection) { if(serverBatchRequest instanceof SinglePartitionKeyServerBatchRequest) { PartitionKey partitionKey = ((SinglePartitionKeyServerBatchRequest) serverBatchRequest).getPartitionKeyValue(); PartitionKeyInternal partitionKeyInternal; if (partitionKey.equals(PartitionKey.NONE)) { PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey(); partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition); } else { partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(partitionKey); } request.setPartitionKeyInternal(partitionKeyInternal); request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson())); } else if(serverBatchRequest instanceof PartitionKeyRangeServerBatchRequest) { request.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(((PartitionKeyRangeServerBatchRequest) serverBatchRequest).getPartitionKeyRangeId())); } else { throw new UnsupportedOperationException("Unknown Server request."); } request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_REQUEST, Boolean.TRUE.toString()); request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_ATOMIC, String.valueOf(serverBatchRequest.isAtomicBatch())); request.getHeaders().put(HttpConstants.HttpHeaders.SHOULD_BATCH_CONTINUE_ON_ERROR, String.valueOf(serverBatchRequest.isShouldContinueOnError())); request.setNumberOfItemsInBatchRequest(serverBatchRequest.getOperations().size()); return request; } private Mono<RxDocumentServiceRequest> populateHeaders(RxDocumentServiceRequest request, RequestVerb httpMethod) { request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123()); if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null || this.cosmosAuthorizationTokenResolver != null || this.credential != null) { String resourceName = request.getResourceAddress(); String authorization = this.getUserAuthorizationToken( resourceName, request.getResourceType(), httpMethod, request.getHeaders(), AuthorizationTokenType.PrimaryMasterKey, request.properties); try { authorization = URLEncoder.encode(authorization, "UTF-8"); } catch (UnsupportedEncodingException e) { throw new IllegalStateException("Failed to encode authtoken.", e); } request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); } if (this.apiType != null) { request.getHeaders().put(HttpConstants.HttpHeaders.API_TYPE, this.apiType.toString()); } if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod)) && !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) { request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON); } if (RequestVerb.PATCH.equals(httpMethod) && !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) { request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON_PATCH); } if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) { request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON); } MetadataDiagnosticsContext metadataDiagnosticsCtx = BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics); if (this.requiresFeedRangeFiltering(request)) { return request.getFeedRange() .populateFeedRangeFilteringHeaders( this.getPartitionKeyRangeCache(), request, this.collectionCache.resolveCollectionAsync(metadataDiagnosticsCtx, request)) .flatMap(this::populateAuthorizationHeader); } return this.populateAuthorizationHeader(request); } private boolean requiresFeedRangeFiltering(RxDocumentServiceRequest request) { if (request.getResourceType() != ResourceType.Document && request.getResourceType() != ResourceType.Conflict) { return false; } switch (request.getOperationType()) { case ReadFeed: case Query: case SqlQuery: return request.getFeedRange() != null; default: return false; } } @Override public Mono<RxDocumentServiceRequest> populateAuthorizationHeader(RxDocumentServiceRequest request) { if (request == null) { throw new IllegalArgumentException("request"); } if (this.authorizationTokenType == AuthorizationTokenType.AadToken) { return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache) .map(authorization -> { request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); return request; }); } else { return Mono.just(request); } } @Override public Mono<HttpHeaders> populateAuthorizationHeader(HttpHeaders httpHeaders) { if (httpHeaders == null) { throw new IllegalArgumentException("httpHeaders"); } if (this.authorizationTokenType == AuthorizationTokenType.AadToken) { return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache) .map(authorization -> { httpHeaders.set(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); return httpHeaders; }); } return Mono.just(httpHeaders); } @Override public AuthorizationTokenType getAuthorizationTokenType() { return this.authorizationTokenType; } @Override public String getUserAuthorizationToken(String resourceName, ResourceType resourceType, RequestVerb requestVerb, Map<String, String> headers, AuthorizationTokenType tokenType, Map<String, Object> properties) { if (this.cosmosAuthorizationTokenResolver != null) { return this.cosmosAuthorizationTokenResolver.getAuthorizationToken(requestVerb.toUpperCase(), resourceName, this.resolveCosmosResourceType(resourceType).toString(), properties != null ? Collections.unmodifiableMap(properties) : null); } else if (credential != null) { return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName, resourceType, headers); } else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) { return masterKeyOrResourceToken; } else { assert resourceTokensMap != null; if(resourceType.equals(ResourceType.DatabaseAccount)) { return this.firstResourceTokenFromPermissionFeed; } return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers); } } private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) { CosmosResourceType cosmosResourceType = ModelBridgeInternal.fromServiceSerializedFormat(resourceType.toString()); if (cosmosResourceType == null) { return CosmosResourceType.SYSTEM; } return cosmosResourceType; } void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) { this.sessionContainer.setSessionToken(request, response.getResponseHeaders()); } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeaders(request, RequestVerb.POST) .flatMap(requestPopulated -> { RxStoreModel storeProxy = this.getStoreProxy(requestPopulated); if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple); }); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeaders(request, RequestVerb.POST) .flatMap(requestPopulated -> { Map<String, String> headers = requestPopulated.getHeaders(); assert (headers != null); headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true"); if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple) .map(response -> { this.captureSessionToken(requestPopulated, response); return response; } ); }); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { return populateHeaders(request, RequestVerb.PUT) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated); }); } private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { populateHeaders(request, RequestVerb.PATCH); if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(request).processMessage(request); } @Override public Mono<ResourceResponse<Document>> createDocument(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs(() -> createDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), requestRetryPolicy); } private Mono<ResourceResponse<Document>> createDocumentInternal(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy requestRetryPolicy) { try { logger.debug("Creating a Document. collectionLink: [{}]", collectionLink); Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(requestRetryPolicy, collectionLink, document, options, disableAutomaticIdGeneration, OperationType.Create); Mono<RxDocumentServiceResponse> responseObservable = requestObs.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options))); return responseObservable .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs(() -> upsertDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), finalRetryPolicyInstance); } private Mono<ResourceResponse<Document>> upsertDocumentInternal(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink); Mono<RxDocumentServiceRequest> reqObs = getCreateDocumentRequest(retryPolicyInstance, collectionLink, document, options, disableAutomaticIdGeneration, OperationType.Upsert); Mono<RxDocumentServiceResponse> responseObservable = reqObs.flatMap(request -> upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options))); return responseObservable .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { String collectionLink = Utils.getCollectionName(documentLink); requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(documentLink, document, options, finalRequestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink, Object document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } if (document == null) { throw new IllegalArgumentException("document"); } Document typedDocument = documentFromObject(document, mapper); return this.replaceDocumentInternal(documentLink, typedDocument, options, retryPolicyInstance); } catch (Exception e) { logger.debug("Failure in replacing a document due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { String collectionLink = document.getSelfLink(); requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(document, options, finalRequestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<Document>> replaceDocumentInternal(Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (document == null) { throw new IllegalArgumentException("document"); } return this.replaceDocumentInternal(document.getSelfLink(), document, options, retryPolicyInstance); } catch (Exception e) { logger.debug("Failure in replacing a database due to [{}]", e.getMessage()); return Mono.error(e); } } private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink, Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { if (document == null) { throw new IllegalArgumentException("document"); } logger.debug("Replacing a Document. documentLink: [{}]", documentLink); final String path = Utils.joinPath(documentLink, null); final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Replace); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer content = serializeJsonToByteBuffer(document); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, content, document, options, collectionObs); return requestObs.flatMap(req -> replace(request, retryPolicyInstance) .map(resp -> toResourceResponse(resp, Document.class))); } @Override public Mono<ResourceResponse<Document>> patchDocument(String documentLink, CosmosPatchOperations cosmosPatchOperations, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> patchDocumentInternal(documentLink, cosmosPatchOperations, options, documentClientRetryPolicy), documentClientRetryPolicy); } private Mono<ResourceResponse<Document>> patchDocumentInternal(String documentLink, CosmosPatchOperations cosmosPatchOperations, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { checkArgument(StringUtils.isNotEmpty(documentLink), "expected non empty documentLink"); checkNotNull(cosmosPatchOperations, "expected non null cosmosPatchOperations"); logger.debug("Running patch operations on Document. documentLink: [{}]", documentLink); final String path = Utils.joinPath(documentLink, null); final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Patch); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer content = ByteBuffer.wrap(PatchUtil.serializeCosmosPatchToByteArray(cosmosPatchOperations, options)); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); final RxDocumentServiceRequest request = RxDocumentServiceRequest.create( this, OperationType.Patch, ResourceType.Document, path, requestHeaders, options, content); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation( request, null, null, options, collectionObs); return requestObs.flatMap(req -> patch(request, retryPolicyInstance) .map(resp -> toResourceResponse(resp, Document.class))); } @Override public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(documentLink, null, options, requestRetryPolicy), requestRetryPolicy); } @Override public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(documentLink, internalObjectNode, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<Document>> deleteDocumentInternal(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } logger.debug("Deleting a Document. documentLink: [{}]", documentLink); String path = Utils.joinPath(documentLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Document, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, internalObjectNode, options, collectionObs); return requestObs.flatMap(req -> this .delete(req, retryPolicyInstance, getOperationContextAndListenerTuple(options)) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class))); } catch (Exception e) { logger.debug("Failure in deleting a document due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKey(String collectionLink, PartitionKey partitionKey, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteAllDocumentsByPartitionKeyInternal(collectionLink, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKeyInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Deleting all items by Partition Key. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.PartitionKey, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.PartitionKey, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs); return requestObs.flatMap(req -> this .deleteAllItemsByPartitionKey(req, retryPolicyInstance, getOperationContextAndListenerTuple(options)) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class))); } catch (Exception e) { logger.debug("Failure in deleting documents due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readDocumentInternal(documentLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Document>> readDocumentInternal(String documentLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } logger.debug("Reading a Document. documentLink: [{}]", documentLink); String path = Utils.joinPath(documentLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Document, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs); return requestObs.flatMap(req -> { return this.read(request, retryPolicyInstance).map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); }); } catch (Exception e) { logger.debug("Failure in reading a document due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Flux<FeedResponse<Document>> readDocuments(String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return queryDocuments(collectionLink, "SELECT * FROM r", options); } @Override public <T> Mono<FeedResponse<T>> readMany( List<CosmosItemIdentity> itemIdentityList, String collectionLink, CosmosQueryRequestOptions options, Class<T> klass) { String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Query, ResourceType.Document, collectionLink, null ); Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request); return collectionObs .flatMap(documentCollectionResourceResponse -> { final DocumentCollection collection = documentCollectionResourceResponse.v; if (collection == null) { throw new IllegalStateException("Collection cannot be null"); } final PartitionKeyDefinition pkDefinition = collection.getPartitionKey(); Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache .tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collection.getResourceId(), null, null); return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> { Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap = new HashMap<>(); CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v; if (routingMap == null) { throw new IllegalStateException("Failed to get routing map."); } itemIdentityList .forEach(itemIdentity -> { String effectivePartitionKeyString = PartitionKeyInternalHelper .getEffectivePartitionKeyString( BridgeInternal.getPartitionKeyInternal( itemIdentity.getPartitionKey()), pkDefinition); PartitionKeyRange range = routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString); if (partitionRangeItemKeyMap.get(range) == null) { List<CosmosItemIdentity> list = new ArrayList<>(); list.add(itemIdentity); partitionRangeItemKeyMap.put(range, list); } else { List<CosmosItemIdentity> pairs = partitionRangeItemKeyMap.get(range); pairs.add(itemIdentity); partitionRangeItemKeyMap.put(range, pairs); } }); Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap; rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap, collection.getPartitionKey()); return createReadManyQuery( resourceLink, new SqlQuerySpec(DUMMY_SQL_QUERY), options, Document.class, ResourceType.Document, collection, Collections.unmodifiableMap(rangeQueryMap)) .collectList() .map(feedList -> { List<T> finalList = new ArrayList<>(); HashMap<String, String> headers = new HashMap<>(); ConcurrentMap<String, QueryMetrics> aggregatedQueryMetrics = new ConcurrentHashMap<>(); double requestCharge = 0; for (FeedResponse<Document> page : feedList) { ConcurrentMap<String, QueryMetrics> pageQueryMetrics = ModelBridgeInternal.queryMetrics(page); if (pageQueryMetrics != null) { pageQueryMetrics.forEach( aggregatedQueryMetrics::putIfAbsent); } requestCharge += page.getRequestCharge(); finalList.addAll(page.getResults().stream().map(document -> ModelBridgeInternal.toObjectFromJsonSerializable(document, klass)).collect(Collectors.toList())); } headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double .toString(requestCharge)); FeedResponse<T> frp = BridgeInternal .createFeedResponse(finalList, headers); return frp; }); }); } ); } private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap( Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap, PartitionKeyDefinition partitionKeyDefinition) { Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>(); String partitionKeySelector = createPkSelector(partitionKeyDefinition); for(Map.Entry<PartitionKeyRange, List<CosmosItemIdentity>> entry: partitionRangeItemKeyMap.entrySet()) { SqlQuerySpec sqlQuerySpec; if (partitionKeySelector.equals("[\"id\"]")) { sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(entry.getValue(), partitionKeySelector); } else { sqlQuerySpec = createReadManyQuerySpec(entry.getValue(), partitionKeySelector); } rangeQueryMap.put(entry.getKey(), sqlQuerySpec); } return rangeQueryMap; } private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame( List<CosmosItemIdentity> idPartitionKeyPairList, String partitionKeySelector) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( "); for (int i = 0; i < idPartitionKeyPairList.size(); i++) { CosmosItemIdentity itemIdentity = idPartitionKeyPairList.get(i); String idValue = itemIdentity.getId(); String idParamName = "@param" + i; PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey(); Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey); if (!Objects.equals(idValue, pkValue)) { continue; } parameters.add(new SqlParameter(idParamName, idValue)); queryStringBuilder.append(idParamName); if (i < idPartitionKeyPairList.size() - 1) { queryStringBuilder.append(", "); } } queryStringBuilder.append(" )"); return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } private SqlQuerySpec createReadManyQuerySpec(List<CosmosItemIdentity> itemIdentities, String partitionKeySelector) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE ( "); for (int i = 0; i < itemIdentities.size(); i++) { CosmosItemIdentity itemIdentity = itemIdentities.get(i); PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey(); Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey); String pkParamName = "@param" + (2 * i); parameters.add(new SqlParameter(pkParamName, pkValue)); String idValue = itemIdentity.getId(); String idParamName = "@param" + (2 * i + 1); parameters.add(new SqlParameter(idParamName, idValue)); queryStringBuilder.append("("); queryStringBuilder.append("c.id = "); queryStringBuilder.append(idParamName); queryStringBuilder.append(" AND "); queryStringBuilder.append(" c"); queryStringBuilder.append(partitionKeySelector); queryStringBuilder.append((" = ")); queryStringBuilder.append(pkParamName); queryStringBuilder.append(" )"); if (i < itemIdentities.size() - 1) { queryStringBuilder.append(" OR "); } } queryStringBuilder.append(" )"); return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } private String createPkSelector(PartitionKeyDefinition partitionKeyDefinition) { return partitionKeyDefinition.getPaths() .stream() .map(pathPart -> StringUtils.substring(pathPart, 1)) .map(pathPart -> StringUtils.replace(pathPart, "\"", "\\")) .map(part -> "[\"" + part + "\"]") .collect(Collectors.joining()); } private <T extends Resource> Flux<FeedResponse<T>> createReadManyQuery( String parentResourceLink, SqlQuerySpec sqlQuery, CosmosQueryRequestOptions options, Class<T> klass, ResourceType resourceTypeEnum, DocumentCollection collection, Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) { UUID activityId = Utils.randomUUID(); IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options)); Flux<? extends IDocumentQueryExecutionContext<T>> executionContext = DocumentQueryExecutionContextFactory.createReadManyQueryAsync(this, queryClient, collection.getResourceId(), sqlQuery, rangeQueryMap, options, collection.getResourceId(), parentResourceLink, activityId, klass, resourceTypeEnum); return executionContext.flatMap(IDocumentQueryExecutionContext<T>::executeAsync); } @Override public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, String query, CosmosQueryRequestOptions options) { return queryDocuments(collectionLink, new SqlQuerySpec(query), options); } private IDocumentQueryClient documentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl, OperationContextAndListenerTuple operationContextAndListenerTuple) { return new IDocumentQueryClient () { @Override public RxCollectionCache getCollectionCache() { return RxDocumentClientImpl.this.collectionCache; } @Override public RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return RxDocumentClientImpl.this.partitionKeyRangeCache; } @Override public IRetryPolicyFactory getResetSessionTokenRetryPolicy() { return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy; } @Override public ConsistencyLevel getDefaultConsistencyLevelAsync() { return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel(); } @Override public ConsistencyLevel getDesiredConsistencyLevelAsync() { return RxDocumentClientImpl.this.consistencyLevel; } @Override public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) { if (operationContextAndListenerTuple == null) { return RxDocumentClientImpl.this.query(request).single(); } else { final OperationListener listener = operationContextAndListenerTuple.getOperationListener(); final OperationContext operationContext = operationContextAndListenerTuple.getOperationContext(); request.getHeaders().put(HttpConstants.HttpHeaders.CORRELATED_ACTIVITY_ID, operationContext.getCorrelationActivityId()); listener.requestListener(operationContext, request); return RxDocumentClientImpl.this.query(request).single().doOnNext( response -> listener.responseListener(operationContext, response) ).doOnError( ex -> listener.exceptionListener(operationContext, ex) ); } } @Override public QueryCompatibilityMode getQueryCompatibilityMode() { return QueryCompatibilityMode.Default; } @Override public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) { return null; } }; } @Override public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { SqlQuerySpecLogger.getInstance().logQuery(querySpec); return createQuery(collectionLink, querySpec, options, Document.class, ResourceType.Document); } @Override public Flux<FeedResponse<Document>> queryDocumentChangeFeed( final DocumentCollection collection, final CosmosChangeFeedRequestOptions changeFeedOptions) { checkNotNull(collection, "Argument 'collection' must not be null."); ChangeFeedQueryImpl<Document> changeFeedQueryImpl = new ChangeFeedQueryImpl<>( this, ResourceType.Document, Document.class, collection.getAltLink(), collection.getResourceId(), changeFeedOptions); return changeFeedQueryImpl.executeAsync(); } @Override public Flux<FeedResponse<Document>> readAllDocuments( String collectionLink, PartitionKey partitionKey, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (partitionKey == null) { throw new IllegalArgumentException("partitionKey"); } RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Query, ResourceType.Document, collectionLink, null ); Flux<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request).flux(); return collectionObs.flatMap(documentCollectionResourceResponse -> { DocumentCollection collection = documentCollectionResourceResponse.v; if (collection == null) { throw new IllegalStateException("Collection cannot be null"); } PartitionKeyDefinition pkDefinition = collection.getPartitionKey(); String pkSelector = createPkSelector(pkDefinition); SqlQuerySpec querySpec = createLogicalPartitionScanQuerySpec(partitionKey, pkSelector); String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document); UUID activityId = Utils.randomUUID(); IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options)); final CosmosQueryRequestOptions effectiveOptions = ModelBridgeInternal.createQueryRequestOptions(options); InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy( this.collectionCache, null, resourceLink, ModelBridgeInternal.getPropertiesFromQueryRequestOptions(effectiveOptions)); return ObservableHelper.fluxInlineIfPossibleAsObs( () -> { Flux<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = this.partitionKeyRangeCache .tryLookupAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collection.getResourceId(), null, null).flux(); return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> { CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v; if (routingMap == null) { throw new IllegalStateException("Failed to get routing map."); } String effectivePartitionKeyString = PartitionKeyInternalHelper .getEffectivePartitionKeyString( BridgeInternal.getPartitionKeyInternal(partitionKey), pkDefinition); PartitionKeyRange range = routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString); return createQueryInternal( resourceLink, querySpec, ModelBridgeInternal.setPartitionKeyRangeIdInternal(effectiveOptions, range.getId()), Document.class, ResourceType.Document, queryClient, activityId); }); }, invalidPartitionExceptionRetryPolicy); }); } @Override public Map<String, PartitionedQueryExecutionInfo> getQueryPlanCache() { return queryPlanCache; } @Override public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class, Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT)); } private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (storedProcedure == null) { throw new IllegalArgumentException("storedProcedure"); } validateResource(storedProcedure); String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options); return request; } private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (udf == null) { throw new IllegalArgumentException("udf"); } validateResource(udf); String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options); return request; } @Override public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink, StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]", collectionLink, storedProcedure.getId()); RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options, OperationType.Create); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedure(String collectionLink, StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedureInternal(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]", collectionLink, storedProcedure.getId()); RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in upserting a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (storedProcedure == null) { throw new IllegalArgumentException("storedProcedure"); } logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId()); RxDocumentClientImpl.validateResource(storedProcedure); String path = Utils.joinPath(storedProcedure.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(storedProcedureLink)) { throw new IllegalArgumentException("storedProcedureLink"); } logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.StoredProcedure, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(storedProcedureLink)) { throw new IllegalArgumentException("storedProcedureLink"); } logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.StoredProcedure, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.StoredProcedure, StoredProcedure.class, Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT)); } @Override public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query, CosmosQueryRequestOptions options) { return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(collectionLink, querySpec, options, StoredProcedure.class, ResourceType.StoredProcedure); } @Override public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink, List<Object> procedureParams) { return this.executeStoredProcedure(storedProcedureLink, null, procedureParams); } @Override public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink, RequestOptions options, List<Object> procedureParams) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy); } @Override public Mono<CosmosBatchResponse> executeBatchRequest(String collectionLink, ServerBatchRequest serverBatchRequest, RequestOptions options, boolean disableAutomaticIdGeneration) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> executeBatchRequestInternal(collectionLink, serverBatchRequest, options, documentClientRetryPolicy, disableAutomaticIdGeneration), documentClientRetryPolicy); } private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink, RequestOptions options, List<Object> procedureParams, DocumentClientRetryPolicy retryPolicy) { try { logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.ExecuteJavaScript); requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.ExecuteJavaScript, ResourceType.StoredProcedure, path, procedureParams != null && !procedureParams.isEmpty() ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "", requestHeaders, options); if (retryPolicy != null) { retryPolicy.onBeforeSendRequest(request); } Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options); return reqObs.flatMap(req -> create(request, retryPolicy, getOperationContextAndListenerTuple(options)) .map(response -> { this.captureSessionToken(request, response); return toStoredProcedureResponse(response); })); } catch (Exception e) { logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } private Mono<CosmosBatchResponse> executeBatchRequestInternal(String collectionLink, ServerBatchRequest serverBatchRequest, RequestOptions options, DocumentClientRetryPolicy requestRetryPolicy, boolean disableAutomaticIdGeneration) { try { logger.debug("Executing a Batch request with number of operations {}", serverBatchRequest.getOperations().size()); Mono<RxDocumentServiceRequest> requestObs = getBatchDocumentRequest(requestRetryPolicy, collectionLink, serverBatchRequest, options, disableAutomaticIdGeneration); Mono<RxDocumentServiceResponse> responseObservable = requestObs.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options))); return responseObservable .map(serviceResponse -> BatchResponseParser.fromDocumentServiceResponse(serviceResponse, serverBatchRequest, true)); } catch (Exception ex) { logger.debug("Failure in executing a batch due to [{}]", ex.getMessage(), ex); return Mono.error(ex); } } @Override public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink, trigger.getId()); RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options, OperationType.Create); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Trigger>> upsertTrigger(String collectionLink, Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> upsertTriggerInternal(String collectionLink, Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a Trigger. collectionLink [{}], trigger id [{}]", collectionLink, trigger.getId()); RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options, OperationType.Upsert); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in upserting a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (trigger == null) { throw new IllegalArgumentException("trigger"); } RxDocumentClientImpl.validateResource(trigger); String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.Trigger, path, trigger, requestHeaders, options); return request; } @Override public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (trigger == null) { throw new IllegalArgumentException("trigger"); } logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId()); RxDocumentClientImpl.validateResource(trigger); String path = Utils.joinPath(trigger.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Trigger, path, trigger, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(triggerLink)) { throw new IllegalArgumentException("triggerLink"); } logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink); String path = Utils.joinPath(triggerLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Trigger, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(triggerLink)) { throw new IllegalArgumentException("triggerLink"); } logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink); String path = Utils.joinPath(triggerLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Trigger, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.Trigger, Trigger.class, Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query, CosmosQueryRequestOptions options) { return queryTriggers(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(collectionLink, querySpec, options, Trigger.class, ResourceType.Trigger); } @Override public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink, UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink, UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink, udf.getId()); RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options, OperationType.Create); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunction(String collectionLink, UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunctionInternal(String collectionLink, UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink, udf.getId()); RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options, OperationType.Upsert); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in upserting a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (udf == null) { throw new IllegalArgumentException("udf"); } logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId()); validateResource(udf); String path = Utils.joinPath(udf.getSelfLink(), null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(udfLink)) { throw new IllegalArgumentException("udfLink"); } logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink); String path = Utils.joinPath(udfLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.UserDefinedFunction, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(udfLink)) { throw new IllegalArgumentException("udfLink"); } logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink); String path = Utils.joinPath(udfLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.UserDefinedFunction, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.UserDefinedFunction, UserDefinedFunction.class, Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink, String query, CosmosQueryRequestOptions options) { return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(collectionLink, querySpec, options, UserDefinedFunction.class, ResourceType.UserDefinedFunction); } @Override public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(conflictLink)) { throw new IllegalArgumentException("conflictLink"); } logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink); String path = Utils.joinPath(conflictLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Conflict, path, requestHeaders, options); Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options); return reqObs.flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class)); }); } catch (Exception e) { logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.Conflict, Conflict.class, Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query, CosmosQueryRequestOptions options) { return queryConflicts(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(collectionLink, querySpec, options, Conflict.class, ResourceType.Conflict); } @Override public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(conflictLink)) { throw new IllegalArgumentException("conflictLink"); } logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink); String path = Utils.joinPath(conflictLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Conflict, path, requestHeaders, options); Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options); return reqObs.flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Conflict.class)); }); } catch (Exception e) { logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy); } private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId()); RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create); return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId()); RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (user == null) { throw new IllegalArgumentException("user"); } RxDocumentClientImpl.validateResource(user); String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.User, path, user, requestHeaders, options); return request; } @Override public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (user == null) { throw new IllegalArgumentException("user"); } logger.debug("Replacing a User. user id [{}]", user.getId()); RxDocumentClientImpl.validateResource(user); String path = Utils.joinPath(user.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.User, path, user, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } logger.debug("Deleting a User. userLink [{}]", userLink); String path = Utils.joinPath(userLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.User, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } logger.debug("Reading a User. userLink [{}]", userLink); String path = Utils.joinPath(userLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.User, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<User>> readUsers(String databaseLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return readFeed(options, ResourceType.User, User.class, Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, CosmosQueryRequestOptions options) { return queryUsers(databaseLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(databaseLink, querySpec, options, User.class, ResourceType.User); } @Override public Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKey(String clientEncryptionKeyLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readClientEncryptionKeyInternal(clientEncryptionKeyLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKeyInternal(String clientEncryptionKeyLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(clientEncryptionKeyLink)) { throw new IllegalArgumentException("clientEncryptionKeyLink"); } logger.debug("Reading a client encryption key. clientEncryptionKeyLink [{}]", clientEncryptionKeyLink); String path = Utils.joinPath(clientEncryptionKeyLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.ClientEncryptionKey, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class)); } catch (Exception e) { logger.debug("Failure in reading a client encryption key due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKey(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createClientEncryptionKeyInternal(databaseLink, clientEncryptionKey, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKeyInternal(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a client encryption key. databaseLink [{}], clientEncryptionKey id [{}]", databaseLink, clientEncryptionKey.getId()); RxDocumentServiceRequest request = getClientEncryptionKeyRequest(databaseLink, clientEncryptionKey, options, OperationType.Create); return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, ClientEncryptionKey.class)); } catch (Exception e) { logger.debug("Failure in creating a client encryption key due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getClientEncryptionKeyRequest(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (clientEncryptionKey == null) { throw new IllegalArgumentException("clientEncryptionKey"); } RxDocumentClientImpl.validateResource(clientEncryptionKey); String path = Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options); return request; } @Override public Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKey(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceClientEncryptionKeyInternal(clientEncryptionKey, nameBasedLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKeyInternal(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (clientEncryptionKey == null) { throw new IllegalArgumentException("clientEncryptionKey"); } logger.debug("Replacing a clientEncryptionKey. clientEncryptionKey id [{}]", clientEncryptionKey.getId()); RxDocumentClientImpl.validateResource(clientEncryptionKey); String path = Utils.joinPath(nameBasedLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class)); } catch (Exception e) { logger.debug("Failure in replacing a clientEncryptionKey due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<ClientEncryptionKey>> readClientEncryptionKeys(String databaseLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return readFeed(options, ResourceType.ClientEncryptionKey, ClientEncryptionKey.class, Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT)); } @Override public Flux<FeedResponse<ClientEncryptionKey>> queryClientEncryptionKeys(String databaseLink, String query, CosmosQueryRequestOptions options) { return queryClientEncryptionKeys(databaseLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<ClientEncryptionKey>> queryClientEncryptionKeys(String databaseLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(databaseLink, querySpec, options, ClientEncryptionKey.class, ResourceType.ClientEncryptionKey); } @Override public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy()); } private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId()); RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options, OperationType.Create); return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId()); RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } if (permission == null) { throw new IllegalArgumentException("permission"); } RxDocumentClientImpl.validateResource(permission); String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.Permission, path, permission, requestHeaders, options); return request; } @Override public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (permission == null) { throw new IllegalArgumentException("permission"); } logger.debug("Replacing a Permission. permission id [{}]", permission.getId()); RxDocumentClientImpl.validateResource(permission); String path = Utils.joinPath(permission.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Permission, path, permission, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(permissionLink)) { throw new IllegalArgumentException("permissionLink"); } logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink); String path = Utils.joinPath(permissionLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Permission, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) { try { if (StringUtils.isEmpty(permissionLink)) { throw new IllegalArgumentException("permissionLink"); } logger.debug("Reading a Permission. permissionLink [{}]", permissionLink); String path = Utils.joinPath(permissionLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Permission, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Permission>> readPermissions(String userLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } return readFeed(options, ResourceType.Permission, Permission.class, Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query, CosmosQueryRequestOptions options) { return queryPermissions(userLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(userLink, querySpec, options, Permission.class, ResourceType.Permission); } @Override public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy); } private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) { try { if (offer == null) { throw new IllegalArgumentException("offer"); } logger.debug("Replacing an Offer. offer id [{}]", offer.getId()); RxDocumentClientImpl.validateResource(offer); String path = Utils.joinPath(offer.getSelfLink(), null); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Offer, path, offer, null, null); return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class)); } catch (Exception e) { logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Offer>> readOffer(String offerLink) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(offerLink)) { throw new IllegalArgumentException("offerLink"); } logger.debug("Reading an Offer. offerLink [{}]", offerLink); String path = Utils.joinPath(offerLink, null); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Offer, path, (HashMap<String, String>)null, null); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class)); } catch (Exception e) { logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Offer>> readOffers(CosmosQueryRequestOptions options) { return readFeed(options, ResourceType.Offer, Offer.class, Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null)); } private <T extends Resource> Flux<FeedResponse<T>> readFeed(CosmosQueryRequestOptions options, ResourceType resourceType, Class<T> klass, String resourceLink) { if (options == null) { options = new CosmosQueryRequestOptions(); } Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(options); int maxPageSize = maxItemCount != null ? maxItemCount : -1; final CosmosQueryRequestOptions finalCosmosQueryRequestOptions = options; DocumentClientRetryPolicy retryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> { Map<String, String> requestHeaders = new HashMap<>(); if (continuationToken != null) { requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken); } requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize)); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, finalCosmosQueryRequestOptions); retryPolicy.onBeforeSendRequest(request); return request; }; Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc = request -> ObservableHelper .inlineIfPossibleAsObs(() -> readFeed(request).map(response -> toFeedResponsePage(response, klass)), retryPolicy); return Paginator.getPaginatedQueryResultAsObservable(options, createRequestFunc, executeFunc, klass, maxPageSize); } @Override public Flux<FeedResponse<Offer>> queryOffers(String query, CosmosQueryRequestOptions options) { return queryOffers(new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(null, querySpec, options, Offer.class, ResourceType.Offer); } @Override public Mono<DatabaseAccount> getDatabaseAccount() { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy), documentClientRetryPolicy); } @Override public DatabaseAccount getLatestDatabaseAccount() { return this.globalEndpointManager.getLatestDatabaseAccount(); } private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Getting Database Account"); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.DatabaseAccount, "", (HashMap<String, String>) null, null); return this.read(request, documentClientRetryPolicy).map(ModelBridgeInternal::toDatabaseAccount); } catch (Exception e) { logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e); return Mono.error(e); } } public Object getSession() { return this.sessionContainer; } public void setSession(Object sessionContainer) { this.sessionContainer = (SessionContainer) sessionContainer; } @Override public RxClientCollectionCache getCollectionCache() { return this.collectionCache; } @Override public RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return partitionKeyRangeCache; } public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) { return Flux.defer(() -> { RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.DatabaseAccount, "", null, (Object) null); return this.populateHeaders(request, RequestVerb.GET) .flatMap(requestPopulated -> { requestPopulated.setEndpointOverride(endpoint); return this.gatewayProxy.processMessage(requestPopulated).doOnError(e -> { String message = String.format("Failed to retrieve database account information. %s", e.getCause() != null ? e.getCause().toString() : e.toString()); logger.warn(message); }).map(rsp -> rsp.getResource(DatabaseAccount.class)) .doOnNext(databaseAccount -> this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount)); }); }); } /** * Certain requests must be routed through gateway even when the client connectivity mode is direct. * * @param request * @return RxStoreModel */ private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) { if (request.UseGatewayMode) { return this.gatewayProxy; } ResourceType resourceType = request.getResourceType(); OperationType operationType = request.getOperationType(); if (resourceType == ResourceType.Offer || resourceType == ResourceType.ClientEncryptionKey || resourceType.isScript() && operationType != OperationType.ExecuteJavaScript || resourceType == ResourceType.PartitionKeyRange || resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) { return this.gatewayProxy; } if (operationType == OperationType.Create || operationType == OperationType.Upsert) { if (resourceType == ResourceType.Database || resourceType == ResourceType.User || resourceType == ResourceType.DocumentCollection || resourceType == ResourceType.Permission) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Delete) { if (resourceType == ResourceType.Database || resourceType == ResourceType.User || resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Replace) { if (resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Read) { if (resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else { if ((operationType == OperationType.Query || operationType == OperationType.SqlQuery || operationType == OperationType.ReadFeed) && Utils.isCollectionChild(request.getResourceType())) { if (request.getPartitionKeyRangeIdentity() == null && request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY) == null) { return this.gatewayProxy; } } return this.storeModel; } } @Override public void close() { logger.info("Attempting to close client {}", this.clientId); if (!closed.getAndSet(true)) { logger.info("Shutting down ..."); logger.info("Closing Global Endpoint Manager ..."); LifeCycleUtils.closeQuietly(this.globalEndpointManager); logger.info("Closing StoreClientFactory ..."); LifeCycleUtils.closeQuietly(this.storeClientFactory); logger.info("Shutting down reactorHttpClient ..."); LifeCycleUtils.closeQuietly(this.reactorHttpClient); logger.info("Shutting down CpuMonitor ..."); CpuMemoryMonitor.unregister(this); if (this.throughputControlEnabled.get()) { logger.info("Closing ThroughputControlStore ..."); this.throughputControlStore.close(); } logger.info("Shutting down completed."); } else { logger.warn("Already shutdown!"); } } @Override public ItemDeserializer getItemDeserializer() { return this.itemDeserializer; } @Override public synchronized void enableThroughputControlGroup(ThroughputControlGroupInternal group) { checkNotNull(group, "Throughput control group can not be null"); if (this.throughputControlEnabled.compareAndSet(false, true)) { this.throughputControlStore = new ThroughputControlStore( this.collectionCache, this.connectionPolicy.getConnectionMode(), this.partitionKeyRangeCache); this.storeModel.enableThroughputControl(throughputControlStore); } this.throughputControlStore.enableThroughputControlGroup(group); } private static SqlQuerySpec createLogicalPartitionScanQuerySpec( PartitionKey partitionKey, String partitionKeySelector) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE"); Object pkValue = ModelBridgeInternal.getPartitionKeyObject(partitionKey); String pkParamName = "@pkValue"; parameters.add(new SqlParameter(pkParamName, pkValue)); queryStringBuilder.append(" c"); queryStringBuilder.append(partitionKeySelector); queryStringBuilder.append((" = ")); queryStringBuilder.append(pkParamName); return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } @Override public Mono<List<FeedRange>> getFeedRanges(String collectionLink) { InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy( this.collectionCache, null, collectionLink, new HashMap<>()); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( this, OperationType.Query, ResourceType.Document, collectionLink, null); invalidPartitionExceptionRetryPolicy.onBeforeSendRequest(request); return ObservableHelper.inlineIfPossibleAsObs( () -> getFeedRangesInternal(request, collectionLink), invalidPartitionExceptionRetryPolicy); } private Mono<List<FeedRange>> getFeedRangesInternal(RxDocumentServiceRequest request, String collectionLink) { logger.debug("getFeedRange collectionLink=[{}]", collectionLink); if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request); return collectionObs.flatMap(documentCollectionResourceResponse -> { final DocumentCollection collection = documentCollectionResourceResponse.v; if (collection == null) { throw new IllegalStateException("Collection cannot be null"); } Mono<Utils.ValueHolder<List<PartitionKeyRange>>> valueHolderMono = partitionKeyRangeCache .tryGetOverlappingRangesAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collection.getResourceId(), RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES, true, null); return valueHolderMono.map(partitionKeyRangeList -> toFeedRanges(partitionKeyRangeList, request)); }); } private static List<FeedRange> toFeedRanges( Utils.ValueHolder<List<PartitionKeyRange>> partitionKeyRangeListValueHolder, RxDocumentServiceRequest request) { final List<PartitionKeyRange> partitionKeyRangeList = partitionKeyRangeListValueHolder.v; if (partitionKeyRangeList == null) { request.forceNameCacheRefresh = true; throw new InvalidPartitionException(); } List<FeedRange> feedRanges = new ArrayList<>(); partitionKeyRangeList.forEach(pkRange -> feedRanges.add(toFeedRange(pkRange))); return feedRanges; } private static FeedRange toFeedRange(PartitionKeyRange pkRange) { return new FeedRangeEpkImpl(pkRange.toRange()); } }
looks like we are setting timeSinceEnqueued twice? if attributes.get(KAFKA_RECORD_QUEUE_TIME_MS) is not null, the code is overwriting the value. Does KAFKA_RECORD_QUEUE_TIME_MS have higher precedence?
private void export(SpanData span, List<TelemetryItem> telemetryItems) { SpanKind kind = span.getKind(); String instrumentationName = span.getInstrumentationLibraryInfo().getName(); if (kind == SpanKind.INTERNAL) { if (instrumentationName.startsWith("io.opentelemetry.spring-scheduling-") && !span.getParentSpanContext().isValid()) { exportRequest(span, telemetryItems); } else { exportRemoteDependency(span, true, telemetryItems); } } else if (kind == SpanKind.CLIENT || kind == SpanKind.PRODUCER) { exportRemoteDependency(span, false, telemetryItems); } else if (kind == SpanKind.CONSUMER && "receive".equals(span.getAttributes().get(SemanticAttributes.MESSAGING_OPERATION))) { exportRemoteDependency(span, false, telemetryItems); } else if (kind == SpanKind.SERVER || kind == SpanKind.CONSUMER) { exportRequest(span, telemetryItems); } else { throw LOGGER.logExceptionAsError(new UnsupportedOperationException(kind.name())); } } private void exportRemoteDependency(SpanData span, boolean inProc, List<TelemetryItem> telemetryItems) { TelemetryItem telemetry = new TelemetryItem(); RemoteDependencyData data = new RemoteDependencyData(); initTelemetry(telemetry, data, "RemoteDependency", "RemoteDependencyData"); data.setProperties(new HashMap<>()); float samplingPercentage = 100; setOperationTags(telemetry, span); setTime(telemetry, span.getStartEpochNanos()); setExtraAttributes(telemetry, data.getProperties(), span.getAttributes()); addLinks(data.getProperties(), span.getLinks()); data.setId(span.getSpanId()); data.setName(getDependencyName(span)); data.setDuration( FormattedDuration.fromNanos(span.getEndEpochNanos() - span.getStartEpochNanos())); data.setSuccess(getSuccess(span)); if (inProc) { data.setType("InProc"); } else { applySemanticConventions(span, data); } telemetryItems.add(telemetry); exportEvents(span, null, telemetryItems); } private static final Set<String> DEFAULT_HTTP_SPAN_NAMES = new HashSet<>( Arrays.asList( "HTTP OPTIONS", "HTTP GET", "HTTP HEAD", "HTTP POST", "HTTP PUT", "HTTP DELETE", "HTTP TRACE", "HTTP CONNECT", "HTTP PATCH")); private static String getDependencyName(SpanData span) { String name = span.getName(); String method = span.getAttributes().get(SemanticAttributes.HTTP_METHOD); if (method == null) { return name; } if (!DEFAULT_HTTP_SPAN_NAMES.contains(name)) { return name; } String url = span.getAttributes().get(SemanticAttributes.HTTP_URL); if (url == null) { return name; } String path = UrlParser.getPathFromUrl(url); if (path == null) { return name; } return path.isEmpty() ? method + " /" : method + " " + path; } private static void applySemanticConventions(SpanData span, RemoteDependencyData remoteDependencyData) { Attributes attributes = span.getAttributes(); String httpMethod = attributes.get(SemanticAttributes.HTTP_METHOD); if (httpMethod != null) { applyHttpClientSpan(attributes, remoteDependencyData); return; } String rpcSystem = attributes.get(SemanticAttributes.RPC_SYSTEM); if (rpcSystem != null) { applyRpcClientSpan(attributes, remoteDependencyData, rpcSystem); return; } String dbSystem = attributes.get(SemanticAttributes.DB_SYSTEM); if (dbSystem != null) { applyDatabaseClientSpan(attributes, remoteDependencyData, dbSystem); return; } String azureNamespace = attributes.get(AZURE_NAMESPACE); if ("Microsoft.EventHub".equals(azureNamespace)) { applyEventHubsSpan(attributes, remoteDependencyData); return; } if ("Microsoft.ServiceBus".equals(azureNamespace)) { applyServiceBusSpan(attributes, remoteDependencyData); return; } String messagingSystem = attributes.get(SemanticAttributes.MESSAGING_SYSTEM); if (messagingSystem != null) { applyMessagingClientSpan(attributes, remoteDependencyData, messagingSystem, span.getKind()); return; } String target = getTargetFromPeerAttributes(attributes, Integer.MAX_VALUE); if (target != null) { remoteDependencyData.setTarget(target); return; } remoteDependencyData.setType("InProc"); } private static void setOperationTags(TelemetryItem telemetry, SpanData span) { setOperationId(telemetry, span.getTraceId()); setOperationParentId(telemetry, span.getParentSpanContext().getSpanId()); setOperationName(telemetry, span.getAttributes()); } private static void setOperationId(TelemetryItem telemetry, String traceId) { telemetry.getTags().put(ContextTagKeys.AI_OPERATION_ID.toString(), traceId); } private static void setOperationParentId(TelemetryItem telemetry, String parentSpanId) { if (SpanId.isValid(parentSpanId)) { telemetry.getTags().put(ContextTagKeys.AI_OPERATION_PARENT_ID.toString(), parentSpanId); } } private static void setOperationName(TelemetryItem telemetry, Attributes attributes) { String operationName = attributes.get(AI_OPERATION_NAME_KEY); if (operationName != null) { setOperationName(telemetry, operationName); } } private static void setOperationName(TelemetryItem telemetry, String operationName) { telemetry.getTags().put(ContextTagKeys.AI_OPERATION_NAME.toString(), operationName); } private static void applyHttpClientSpan(Attributes attributes, RemoteDependencyData telemetry) { String target = getTargetForHttpClientSpan(attributes); telemetry.setType("Http"); telemetry.setTarget(target); Long httpStatusCode = attributes.get(SemanticAttributes.HTTP_STATUS_CODE); if (httpStatusCode != null) { telemetry.setResultCode(Long.toString(httpStatusCode)); } String url = attributes.get(SemanticAttributes.HTTP_URL); telemetry.setData(url); } private static String getTargetForHttpClientSpan(Attributes attributes) { String target = getTargetFromPeerService(attributes); if (target != null) { return target; } target = attributes.get(SemanticAttributes.HTTP_HOST); if (target != null) { String scheme = attributes.get(SemanticAttributes.HTTP_SCHEME); if ("http".equals(scheme)) { if (target.endsWith(":80")) { target = target.substring(0, target.length() - 3); } } else if ("https".equals(scheme)) { if (target.endsWith(":443")) { target = target.substring(0, target.length() - 4); } } return target; } String url = attributes.get(SemanticAttributes.HTTP_URL); if (url != null) { target = UrlParser.getTargetFromUrl(url); if (target != null) { return target; } } String scheme = attributes.get(SemanticAttributes.HTTP_SCHEME); int defaultPort; if ("http".equals(scheme)) { defaultPort = 80; } else if ("https".equals(scheme)) { defaultPort = 443; } else { defaultPort = 0; } target = getTargetFromNetAttributes(attributes, defaultPort); if (target != null) { return target; } return "Http"; } @Nullable private static String getTargetFromPeerAttributes(Attributes attributes, int defaultPort) { String target = getTargetFromPeerService(attributes); if (target != null) { return target; } return getTargetFromNetAttributes(attributes, defaultPort); } @Nullable private static String getTargetFromPeerService(Attributes attributes) { return attributes.get(SemanticAttributes.PEER_SERVICE); } @Nullable private static String getTargetFromNetAttributes(Attributes attributes, int defaultPort) { String target = getHostFromNetAttributes(attributes); if (target == null) { return null; } Long port = attributes.get(SemanticAttributes.NET_PEER_PORT); if (port != null && port != defaultPort) { return target + ":" + port; } return target; } @Nullable private static String getHostFromNetAttributes(Attributes attributes) { String host = attributes.get(SemanticAttributes.NET_PEER_NAME); if (host != null) { return host; } return attributes.get(SemanticAttributes.NET_PEER_IP); } private static void applyRpcClientSpan(Attributes attributes, RemoteDependencyData telemetry, String rpcSystem) { telemetry.setType(rpcSystem); String target = getTargetFromPeerAttributes(attributes, 0); if (target == null) { target = rpcSystem; } telemetry.setTarget(target); } private static void applyDatabaseClientSpan(Attributes attributes, RemoteDependencyData telemetry, String dbSystem) { String dbStatement = attributes.get(SemanticAttributes.DB_STATEMENT); String type; if (SQL_DB_SYSTEMS.contains(dbSystem)) { if (dbSystem.equals(SemanticAttributes.DbSystemValues.MYSQL)) { type = "mysql"; } else if (dbSystem.equals(SemanticAttributes.DbSystemValues.POSTGRESQL)) { type = "postgresql"; } else { type = "SQL"; } } else { type = dbSystem; } telemetry.setType(type); telemetry.setData(dbStatement); String target = nullAwareConcat( getTargetFromPeerAttributes(attributes, getDefaultPortForDbSystem(dbSystem)), attributes.get(SemanticAttributes.DB_NAME), " | "); if (target == null) { target = dbSystem; } telemetry.setTarget(target); } private static void applyMessagingClientSpan(Attributes attributes, RemoteDependencyData telemetry, String messagingSystem, SpanKind spanKind) { if (spanKind == SpanKind.PRODUCER) { telemetry.setType("Queue Message | " + messagingSystem); } else { telemetry.setType(messagingSystem); } String destination = attributes.get(SemanticAttributes.MESSAGING_DESTINATION); if (destination != null) { telemetry.setTarget(destination); } else { telemetry.setTarget(messagingSystem); } } private static void applyEventHubsSpan(Attributes attributes, RemoteDependencyData telemetry) { telemetry.setType("Microsoft.EventHub"); telemetry.setTarget(getAzureSdkTargetSource(attributes)); } private static void applyServiceBusSpan(Attributes attributes, RemoteDependencyData telemetry) { telemetry.setType("AZURE SERVICE BUS"); telemetry.setTarget(getAzureSdkTargetSource(attributes)); } private static String getAzureSdkTargetSource(Attributes attributes) { String peerAddress = attributes.get(AZURE_SDK_PEER_ADDRESS); String destination = attributes.get(AZURE_SDK_MESSAGE_BUS_DESTINATION); return peerAddress + "/" + destination; } private static int getDefaultPortForDbSystem(String dbSystem) { switch (dbSystem) { case SemanticAttributes.DbSystemValues.MONGODB: return 27017; case SemanticAttributes.DbSystemValues.CASSANDRA: return 9042; case SemanticAttributes.DbSystemValues.REDIS: return 6379; case SemanticAttributes.DbSystemValues.MARIADB: case SemanticAttributes.DbSystemValues.MYSQL: return 3306; case SemanticAttributes.DbSystemValues.MSSQL: return 1433; case SemanticAttributes.DbSystemValues.DB2: return 50000; case SemanticAttributes.DbSystemValues.ORACLE: return 1521; case SemanticAttributes.DbSystemValues.H2: return 8082; case SemanticAttributes.DbSystemValues.DERBY: return 1527; case SemanticAttributes.DbSystemValues.POSTGRESQL: return 5432; default: return 0; } } private void exportRequest(SpanData span, List<TelemetryItem> telemetryItems) { TelemetryItem telemetry = new TelemetryItem(); RequestData data = new RequestData(); initTelemetry(telemetry, data, "Request", "RequestData"); data.setProperties(new HashMap<>()); Attributes attributes = span.getAttributes(); long startEpochNanos = span.getStartEpochNanos(); float samplingPercentage = 100; data.setId(span.getSpanId()); setTime(telemetry, startEpochNanos); setExtraAttributes(telemetry, data.getProperties(), attributes); addLinks(data.getProperties(), span.getLinks()); String operationName = getOperationName(span); telemetry.getTags().put(ContextTagKeys.AI_OPERATION_NAME.toString(), operationName); telemetry.getTags().put(ContextTagKeys.AI_OPERATION_ID.toString(), span.getTraceId()); telemetry .getTags() .put( ContextTagKeys.AI_OPERATION_PARENT_ID.toString(), span.getParentSpanContext().getSpanId()); data.setName(operationName); data.setDuration(FormattedDuration.fromNanos(span.getEndEpochNanos() - startEpochNanos)); data.setSuccess(getSuccess(span)); String httpUrl = getHttpUrlFromServerSpan(attributes); if (httpUrl != null) { data.setUrl(httpUrl); } Long httpStatusCode = attributes.get(SemanticAttributes.HTTP_STATUS_CODE); if (httpStatusCode == null) { httpStatusCode = attributes.get(SemanticAttributes.RPC_GRPC_STATUS_CODE); } if (httpStatusCode != null) { data.setResponseCode(Long.toString(httpStatusCode)); } else { data.setResponseCode("0"); } String locationIp = attributes.get(SemanticAttributes.HTTP_CLIENT_IP); if (locationIp == null) { locationIp = attributes.get(SemanticAttributes.NET_PEER_IP); } if (locationIp != null) { telemetry.getTags().put(ContextTagKeys.AI_LOCATION_IP.toString(), locationIp); } data.setSource(getSource(attributes)); Long enqueuedTime = attributes.get(AZURE_SDK_ENQUEUED_TIME); if (enqueuedTime != null) { long timeSinceEnqueuedMillis = Math.max( 0L, NANOSECONDS.toMillis(span.getStartEpochNanos()) - SECONDS.toMillis(enqueuedTime)); if (data.getMeasurements() == null) { data.setMeasurements(new HashMap<>()); } data.getMeasurements().put("timeSinceEnqueued", (double) timeSinceEnqueuedMillis); } Long timeSinceEnqueuedMillis = attributes.get(KAFKA_RECORD_QUEUE_TIME_MS); if (timeSinceEnqueuedMillis != null) { if (data.getMeasurements() == null) { data.setMeasurements(new HashMap<>()); } data.getMeasurements().put("timeSinceEnqueued", (double) timeSinceEnqueuedMillis); } telemetryItems.add(telemetry); exportEvents(span, operationName, telemetryItems); } private boolean getSuccess(SpanData span) { switch (span.getStatus().getStatusCode()) { case ERROR: return false; case OK: return true; case UNSET: Long statusCode = span.getAttributes().get(SemanticAttributes.HTTP_STATUS_CODE); return statusCode == null || statusCode < 400; default: return true; } } @Nullable private static String getHttpUrlFromServerSpan(Attributes attributes) { String httpUrl = attributes.get(SemanticAttributes.HTTP_URL); if (httpUrl != null) { return httpUrl; } String scheme = attributes.get(SemanticAttributes.HTTP_SCHEME); if (scheme == null) { return null; } String host = attributes.get(SemanticAttributes.HTTP_HOST); if (host == null) { return null; } String target = attributes.get(SemanticAttributes.HTTP_TARGET); if (target == null) { return null; } return scheme + ": } private static String getSource(Attributes attributes) { if (isAzureQueue(attributes)) { return getAzureSdkTargetSource(attributes); } String messagingSystem = attributes.get(SemanticAttributes.MESSAGING_SYSTEM); if (messagingSystem != null) { String source = nullAwareConcat( getTargetFromPeerAttributes(attributes, 0), attributes.get(SemanticAttributes.MESSAGING_DESTINATION), "/"); if (source != null) { return source; } return messagingSystem; } return null; } private static boolean isAzureQueue(Attributes attributes) { String azureNamespace = attributes.get(AZURE_NAMESPACE); return "Microsoft.EventHub".equals(azureNamespace) || "Microsoft.ServiceBus".equals(azureNamespace); } private static String getOperationName(SpanData span) { String spanName = span.getName(); String httpMethod = span.getAttributes().get(SemanticAttributes.HTTP_METHOD); if (httpMethod != null && !httpMethod.isEmpty() && spanName.startsWith("/")) { return httpMethod + " " + spanName; } return spanName; } private static String nullAwareConcat(String str1, String str2, String separator) { if (str1 == null) { return str2; } if (str2 == null) { return str1; } return str1 + separator + str2; } private void exportEvents(SpanData span, @Nullable String operationName, List<TelemetryItem> telemetryItems) { for (EventData event : span.getEvents()) { if (event.getAttributes().get(SemanticAttributes.EXCEPTION_TYPE) != null || event.getAttributes().get(SemanticAttributes.EXCEPTION_MESSAGE) != null) { String stacktrace = event.getAttributes().get(SemanticAttributes.EXCEPTION_STACKTRACE); if (stacktrace != null) { trackException(stacktrace, span, operationName, telemetryItems); } return; } TelemetryItem telemetry = new TelemetryItem(); MessageData data = new MessageData(); initTelemetry(telemetry, data, "Message", "MessageData"); data.setProperties(new HashMap<>()); setOperationId(telemetry, span.getTraceId()); setOperationParentId(telemetry, span.getSpanId()); if (operationName != null) { setOperationName(telemetry, operationName); } else { setOperationName(telemetry, span.getAttributes()); } setTime(telemetry, event.getEpochNanos()); setExtraAttributes(telemetry, data.getProperties(), event.getAttributes()); data.setMessage(event.getName()); telemetryItems.add(telemetry); } } private void trackException(String errorStack, SpanData span, @Nullable String operationName, List<TelemetryItem> telemetryItems) { TelemetryItem telemetry = new TelemetryItem(); TelemetryExceptionData data = new TelemetryExceptionData(); initTelemetry(telemetry, data, "Exception", "ExceptionData"); data.setProperties(new HashMap<>()); setOperationId(telemetry, span.getTraceId()); setOperationParentId(telemetry, span.getSpanId()); if (operationName != null) { setOperationName(telemetry, operationName); } else { setOperationName(telemetry, span.getAttributes()); } setTime(telemetry, span.getEndEpochNanos()); data.setExceptions(Exceptions.minimalParse(errorStack)); telemetryItems.add(telemetry); } private void initTelemetry(TelemetryItem telemetry, MonitorDomain data, String telemetryName, String baseType) { telemetry.setVersion(1); telemetry.setName(telemetryName); telemetry.setInstrumentationKey(instrumentationKey); telemetry.setTags(new HashMap<>()); telemetry.getTags().put(ContextTagKeys.AI_INTERNAL_SDK_VERSION.toString(), VersionGenerator.getSdkVersion()); data.setVersion(2); MonitorBase monitorBase = new MonitorBase(); telemetry.setData(monitorBase); monitorBase.setBaseType(baseType); monitorBase.setBaseData(data); } private static void setTime(TelemetryItem telemetry, long epochNanos) { telemetry.setTime(FormattedTime.offSetDateTimeFromEpochNanos(epochNanos)); } private static void addLinks(Map<String, String> properties, List<LinkData> links) { if (links.isEmpty()) { return; } StringBuilder sb = new StringBuilder(); sb.append("["); boolean first = true; for (LinkData link : links) { if (!first) { sb.append(","); } sb.append("{\"operation_Id\":\""); sb.append(link.getSpanContext().getTraceId()); sb.append("\",\"id\":\""); sb.append(link.getSpanContext().getSpanId()); sb.append("\"}"); first = false; } sb.append("]"); properties.put("_MS.links", sb.toString()); } private static void setExtraAttributes(TelemetryItem telemetry, Map<String, String> properties, Attributes attributes) { attributes.forEach((key, value) -> { String stringKey = key.getKey(); if (stringKey.equals(AZURE_NAMESPACE.getKey()) || stringKey.equals(AZURE_SDK_MESSAGE_BUS_DESTINATION.getKey()) || stringKey.equals(AZURE_SDK_ENQUEUED_TIME.getKey())) { return; } if (stringKey.equals(KAFKA_RECORD_QUEUE_TIME_MS.getKey()) || stringKey.equals(KAFKA_OFFSET.getKey())) { return; } if (stringKey.equals(SemanticAttributes.ENDUSER_ID.getKey()) && value instanceof String) { telemetry.getTags().put(ContextTagKeys.AI_USER_ID.toString(), (String) value); return; } if (stringKey.equals(SemanticAttributes.HTTP_USER_AGENT.getKey()) && value instanceof String) { telemetry.getTags().put("ai.user.userAgent", (String) value); return; } int index = stringKey.indexOf("."); String prefix = index == -1 ? stringKey : stringKey.substring(0, index); if (STANDARD_ATTRIBUTE_PREFIXES.contains(prefix)) { return; } String val = convertToString(value, key.getType()); if (value != null) { properties.put(key.getKey(), val); } }); } @Nullable private static String convertToString(Object value, AttributeType type) { switch (type) { case STRING: case BOOLEAN: case LONG: case DOUBLE: return String.valueOf(value); case STRING_ARRAY: case BOOLEAN_ARRAY: case LONG_ARRAY: case DOUBLE_ARRAY: return join((List<?>) value); default: LOGGER.warning("unexpected attribute type: {}", type); return null; } } private static <T> String join(List<T> values) { StringBuilder sb = new StringBuilder(); for (Object val : values) { if (sb.length() > 0) { sb.append(", "); } sb.append(val); } return sb.toString(); } }
data.getMeasurements().put("timeSinceEnqueued", (double) timeSinceEnqueuedMillis);
private void export(SpanData span, List<TelemetryItem> telemetryItems) { SpanKind kind = span.getKind(); String instrumentationName = span.getInstrumentationLibraryInfo().getName(); if (kind == SpanKind.INTERNAL) { if (instrumentationName.startsWith("io.opentelemetry.spring-scheduling-") && !span.getParentSpanContext().isValid()) { exportRequest(span, telemetryItems); } else { exportRemoteDependency(span, true, telemetryItems); } } else if (kind == SpanKind.CLIENT || kind == SpanKind.PRODUCER) { exportRemoteDependency(span, false, telemetryItems); } else if (kind == SpanKind.CONSUMER && "receive".equals(span.getAttributes().get(SemanticAttributes.MESSAGING_OPERATION))) { exportRemoteDependency(span, false, telemetryItems); } else if (kind == SpanKind.SERVER || kind == SpanKind.CONSUMER) { exportRequest(span, telemetryItems); } else { throw LOGGER.logExceptionAsError(new UnsupportedOperationException(kind.name())); } } private void exportRemoteDependency(SpanData span, boolean inProc, List<TelemetryItem> telemetryItems) { TelemetryItem telemetry = new TelemetryItem(); RemoteDependencyData data = new RemoteDependencyData(); initTelemetry(telemetry, data, "RemoteDependency", "RemoteDependencyData"); data.setProperties(new HashMap<>()); float samplingPercentage = 100; setOperationTags(telemetry, span); setTime(telemetry, span.getStartEpochNanos()); setExtraAttributes(telemetry, data.getProperties(), span.getAttributes()); addLinks(data.getProperties(), span.getLinks()); data.setId(span.getSpanId()); data.setName(getDependencyName(span)); data.setDuration( FormattedDuration.fromNanos(span.getEndEpochNanos() - span.getStartEpochNanos())); data.setSuccess(getSuccess(span)); if (inProc) { data.setType("InProc"); } else { applySemanticConventions(span, data); } telemetryItems.add(telemetry); exportEvents(span, null, telemetryItems); } private static final Set<String> DEFAULT_HTTP_SPAN_NAMES = new HashSet<>( Arrays.asList( "HTTP OPTIONS", "HTTP GET", "HTTP HEAD", "HTTP POST", "HTTP PUT", "HTTP DELETE", "HTTP TRACE", "HTTP CONNECT", "HTTP PATCH")); private static String getDependencyName(SpanData span) { String name = span.getName(); String method = span.getAttributes().get(SemanticAttributes.HTTP_METHOD); if (method == null) { return name; } if (!DEFAULT_HTTP_SPAN_NAMES.contains(name)) { return name; } String url = span.getAttributes().get(SemanticAttributes.HTTP_URL); if (url == null) { return name; } String path = UrlParser.getPathFromUrl(url); if (path == null) { return name; } return path.isEmpty() ? method + " /" : method + " " + path; } private static void applySemanticConventions(SpanData span, RemoteDependencyData remoteDependencyData) { Attributes attributes = span.getAttributes(); String httpMethod = attributes.get(SemanticAttributes.HTTP_METHOD); if (httpMethod != null) { applyHttpClientSpan(attributes, remoteDependencyData); return; } String rpcSystem = attributes.get(SemanticAttributes.RPC_SYSTEM); if (rpcSystem != null) { applyRpcClientSpan(attributes, remoteDependencyData, rpcSystem); return; } String dbSystem = attributes.get(SemanticAttributes.DB_SYSTEM); if (dbSystem != null) { applyDatabaseClientSpan(attributes, remoteDependencyData, dbSystem); return; } String azureNamespace = attributes.get(AZURE_NAMESPACE); if ("Microsoft.EventHub".equals(azureNamespace)) { applyEventHubsSpan(attributes, remoteDependencyData); return; } if ("Microsoft.ServiceBus".equals(azureNamespace)) { applyServiceBusSpan(attributes, remoteDependencyData); return; } String messagingSystem = attributes.get(SemanticAttributes.MESSAGING_SYSTEM); if (messagingSystem != null) { applyMessagingClientSpan(attributes, remoteDependencyData, messagingSystem, span.getKind()); return; } String target = getTargetFromPeerAttributes(attributes, Integer.MAX_VALUE); if (target != null) { remoteDependencyData.setTarget(target); return; } remoteDependencyData.setType("InProc"); } private static void setOperationTags(TelemetryItem telemetry, SpanData span) { setOperationId(telemetry, span.getTraceId()); setOperationParentId(telemetry, span.getParentSpanContext().getSpanId()); setOperationName(telemetry, span.getAttributes()); } private static void setOperationId(TelemetryItem telemetry, String traceId) { telemetry.getTags().put(ContextTagKeys.AI_OPERATION_ID.toString(), traceId); } private static void setOperationParentId(TelemetryItem telemetry, String parentSpanId) { if (SpanId.isValid(parentSpanId)) { telemetry.getTags().put(ContextTagKeys.AI_OPERATION_PARENT_ID.toString(), parentSpanId); } } private static void setOperationName(TelemetryItem telemetry, Attributes attributes) { String operationName = attributes.get(AI_OPERATION_NAME_KEY); if (operationName != null) { setOperationName(telemetry, operationName); } } private static void setOperationName(TelemetryItem telemetry, String operationName) { telemetry.getTags().put(ContextTagKeys.AI_OPERATION_NAME.toString(), operationName); } private static void applyHttpClientSpan(Attributes attributes, RemoteDependencyData telemetry) { String target = getTargetForHttpClientSpan(attributes); telemetry.setType("Http"); telemetry.setTarget(target); Long httpStatusCode = attributes.get(SemanticAttributes.HTTP_STATUS_CODE); if (httpStatusCode != null) { telemetry.setResultCode(Long.toString(httpStatusCode)); } String url = attributes.get(SemanticAttributes.HTTP_URL); telemetry.setData(url); } private static String getTargetForHttpClientSpan(Attributes attributes) { String target = getTargetFromPeerService(attributes); if (target != null) { return target; } target = attributes.get(SemanticAttributes.HTTP_HOST); if (target != null) { String scheme = attributes.get(SemanticAttributes.HTTP_SCHEME); if ("http".equals(scheme)) { if (target.endsWith(":80")) { target = target.substring(0, target.length() - 3); } } else if ("https".equals(scheme)) { if (target.endsWith(":443")) { target = target.substring(0, target.length() - 4); } } return target; } String url = attributes.get(SemanticAttributes.HTTP_URL); if (url != null) { target = UrlParser.getTargetFromUrl(url); if (target != null) { return target; } } String scheme = attributes.get(SemanticAttributes.HTTP_SCHEME); int defaultPort; if ("http".equals(scheme)) { defaultPort = 80; } else if ("https".equals(scheme)) { defaultPort = 443; } else { defaultPort = 0; } target = getTargetFromNetAttributes(attributes, defaultPort); if (target != null) { return target; } return "Http"; } @Nullable private static String getTargetFromPeerAttributes(Attributes attributes, int defaultPort) { String target = getTargetFromPeerService(attributes); if (target != null) { return target; } return getTargetFromNetAttributes(attributes, defaultPort); } @Nullable private static String getTargetFromPeerService(Attributes attributes) { return attributes.get(SemanticAttributes.PEER_SERVICE); } @Nullable private static String getTargetFromNetAttributes(Attributes attributes, int defaultPort) { String target = getHostFromNetAttributes(attributes); if (target == null) { return null; } Long port = attributes.get(SemanticAttributes.NET_PEER_PORT); if (port != null && port != defaultPort) { return target + ":" + port; } return target; } @Nullable private static String getHostFromNetAttributes(Attributes attributes) { String host = attributes.get(SemanticAttributes.NET_PEER_NAME); if (host != null) { return host; } return attributes.get(SemanticAttributes.NET_PEER_IP); } private static void applyRpcClientSpan(Attributes attributes, RemoteDependencyData telemetry, String rpcSystem) { telemetry.setType(rpcSystem); String target = getTargetFromPeerAttributes(attributes, 0); if (target == null) { target = rpcSystem; } telemetry.setTarget(target); } private static void applyDatabaseClientSpan(Attributes attributes, RemoteDependencyData telemetry, String dbSystem) { String dbStatement = attributes.get(SemanticAttributes.DB_STATEMENT); String type; if (SQL_DB_SYSTEMS.contains(dbSystem)) { if (dbSystem.equals(SemanticAttributes.DbSystemValues.MYSQL)) { type = "mysql"; } else if (dbSystem.equals(SemanticAttributes.DbSystemValues.POSTGRESQL)) { type = "postgresql"; } else { type = "SQL"; } } else { type = dbSystem; } telemetry.setType(type); telemetry.setData(dbStatement); String target = nullAwareConcat( getTargetFromPeerAttributes(attributes, getDefaultPortForDbSystem(dbSystem)), attributes.get(SemanticAttributes.DB_NAME), " | "); if (target == null) { target = dbSystem; } telemetry.setTarget(target); } private static void applyMessagingClientSpan(Attributes attributes, RemoteDependencyData telemetry, String messagingSystem, SpanKind spanKind) { if (spanKind == SpanKind.PRODUCER) { telemetry.setType("Queue Message | " + messagingSystem); } else { telemetry.setType(messagingSystem); } String destination = attributes.get(SemanticAttributes.MESSAGING_DESTINATION); if (destination != null) { telemetry.setTarget(destination); } else { telemetry.setTarget(messagingSystem); } } private static void applyEventHubsSpan(Attributes attributes, RemoteDependencyData telemetry) { telemetry.setType("Microsoft.EventHub"); telemetry.setTarget(getAzureSdkTargetSource(attributes)); } private static void applyServiceBusSpan(Attributes attributes, RemoteDependencyData telemetry) { telemetry.setType("AZURE SERVICE BUS"); telemetry.setTarget(getAzureSdkTargetSource(attributes)); } private static String getAzureSdkTargetSource(Attributes attributes) { String peerAddress = attributes.get(AZURE_SDK_PEER_ADDRESS); String destination = attributes.get(AZURE_SDK_MESSAGE_BUS_DESTINATION); return peerAddress + "/" + destination; } private static int getDefaultPortForDbSystem(String dbSystem) { switch (dbSystem) { case SemanticAttributes.DbSystemValues.MONGODB: return 27017; case SemanticAttributes.DbSystemValues.CASSANDRA: return 9042; case SemanticAttributes.DbSystemValues.REDIS: return 6379; case SemanticAttributes.DbSystemValues.MARIADB: case SemanticAttributes.DbSystemValues.MYSQL: return 3306; case SemanticAttributes.DbSystemValues.MSSQL: return 1433; case SemanticAttributes.DbSystemValues.DB2: return 50000; case SemanticAttributes.DbSystemValues.ORACLE: return 1521; case SemanticAttributes.DbSystemValues.H2: return 8082; case SemanticAttributes.DbSystemValues.DERBY: return 1527; case SemanticAttributes.DbSystemValues.POSTGRESQL: return 5432; default: return 0; } } private void exportRequest(SpanData span, List<TelemetryItem> telemetryItems) { TelemetryItem telemetry = new TelemetryItem(); RequestData data = new RequestData(); initTelemetry(telemetry, data, "Request", "RequestData"); data.setProperties(new HashMap<>()); Attributes attributes = span.getAttributes(); long startEpochNanos = span.getStartEpochNanos(); float samplingPercentage = 100; data.setId(span.getSpanId()); setTime(telemetry, startEpochNanos); setExtraAttributes(telemetry, data.getProperties(), attributes); addLinks(data.getProperties(), span.getLinks()); String operationName = getOperationName(span); telemetry.getTags().put(ContextTagKeys.AI_OPERATION_NAME.toString(), operationName); telemetry.getTags().put(ContextTagKeys.AI_OPERATION_ID.toString(), span.getTraceId()); telemetry .getTags() .put( ContextTagKeys.AI_OPERATION_PARENT_ID.toString(), span.getParentSpanContext().getSpanId()); data.setName(operationName); data.setDuration(FormattedDuration.fromNanos(span.getEndEpochNanos() - startEpochNanos)); data.setSuccess(getSuccess(span)); String httpUrl = getHttpUrlFromServerSpan(attributes); if (httpUrl != null) { data.setUrl(httpUrl); } Long httpStatusCode = attributes.get(SemanticAttributes.HTTP_STATUS_CODE); if (httpStatusCode == null) { httpStatusCode = attributes.get(SemanticAttributes.RPC_GRPC_STATUS_CODE); } if (httpStatusCode != null) { data.setResponseCode(Long.toString(httpStatusCode)); } else { data.setResponseCode("0"); } String locationIp = attributes.get(SemanticAttributes.HTTP_CLIENT_IP); if (locationIp == null) { locationIp = attributes.get(SemanticAttributes.NET_PEER_IP); } if (locationIp != null) { telemetry.getTags().put(ContextTagKeys.AI_LOCATION_IP.toString(), locationIp); } data.setSource(getSource(attributes)); Long enqueuedTime = attributes.get(AZURE_SDK_ENQUEUED_TIME); if (enqueuedTime != null) { long timeSinceEnqueuedMillis = Math.max( 0L, NANOSECONDS.toMillis(span.getStartEpochNanos()) - SECONDS.toMillis(enqueuedTime)); if (data.getMeasurements() == null) { data.setMeasurements(new HashMap<>()); } data.getMeasurements().put("timeSinceEnqueued", (double) timeSinceEnqueuedMillis); } Long timeSinceEnqueuedMillis = attributes.get(KAFKA_RECORD_QUEUE_TIME_MS); if (timeSinceEnqueuedMillis != null) { if (data.getMeasurements() == null) { data.setMeasurements(new HashMap<>()); } data.getMeasurements().put("timeSinceEnqueued", (double) timeSinceEnqueuedMillis); } telemetryItems.add(telemetry); exportEvents(span, operationName, telemetryItems); } private boolean getSuccess(SpanData span) { switch (span.getStatus().getStatusCode()) { case ERROR: return false; case OK: return true; case UNSET: Long statusCode = span.getAttributes().get(SemanticAttributes.HTTP_STATUS_CODE); return statusCode == null || statusCode < 400; default: return true; } } @Nullable private static String getHttpUrlFromServerSpan(Attributes attributes) { String httpUrl = attributes.get(SemanticAttributes.HTTP_URL); if (httpUrl != null) { return httpUrl; } String scheme = attributes.get(SemanticAttributes.HTTP_SCHEME); if (scheme == null) { return null; } String host = attributes.get(SemanticAttributes.HTTP_HOST); if (host == null) { return null; } String target = attributes.get(SemanticAttributes.HTTP_TARGET); if (target == null) { return null; } return scheme + ": } private static String getSource(Attributes attributes) { if (isAzureQueue(attributes)) { return getAzureSdkTargetSource(attributes); } String messagingSystem = attributes.get(SemanticAttributes.MESSAGING_SYSTEM); if (messagingSystem != null) { String source = nullAwareConcat( getTargetFromPeerAttributes(attributes, 0), attributes.get(SemanticAttributes.MESSAGING_DESTINATION), "/"); if (source != null) { return source; } return messagingSystem; } return null; } private static boolean isAzureQueue(Attributes attributes) { String azureNamespace = attributes.get(AZURE_NAMESPACE); return "Microsoft.EventHub".equals(azureNamespace) || "Microsoft.ServiceBus".equals(azureNamespace); } private static String getOperationName(SpanData span) { String spanName = span.getName(); String httpMethod = span.getAttributes().get(SemanticAttributes.HTTP_METHOD); if (httpMethod != null && !httpMethod.isEmpty() && spanName.startsWith("/")) { return httpMethod + " " + spanName; } return spanName; } private static String nullAwareConcat(String str1, String str2, String separator) { if (str1 == null) { return str2; } if (str2 == null) { return str1; } return str1 + separator + str2; } private void exportEvents(SpanData span, @Nullable String operationName, List<TelemetryItem> telemetryItems) { for (EventData event : span.getEvents()) { if (event.getAttributes().get(SemanticAttributes.EXCEPTION_TYPE) != null || event.getAttributes().get(SemanticAttributes.EXCEPTION_MESSAGE) != null) { String stacktrace = event.getAttributes().get(SemanticAttributes.EXCEPTION_STACKTRACE); if (stacktrace != null) { trackException(stacktrace, span, operationName, telemetryItems); } return; } TelemetryItem telemetry = new TelemetryItem(); MessageData data = new MessageData(); initTelemetry(telemetry, data, "Message", "MessageData"); data.setProperties(new HashMap<>()); setOperationId(telemetry, span.getTraceId()); setOperationParentId(telemetry, span.getSpanId()); if (operationName != null) { setOperationName(telemetry, operationName); } else { setOperationName(telemetry, span.getAttributes()); } setTime(telemetry, event.getEpochNanos()); setExtraAttributes(telemetry, data.getProperties(), event.getAttributes()); data.setMessage(event.getName()); telemetryItems.add(telemetry); } } private void trackException(String errorStack, SpanData span, @Nullable String operationName, List<TelemetryItem> telemetryItems) { TelemetryItem telemetry = new TelemetryItem(); TelemetryExceptionData data = new TelemetryExceptionData(); initTelemetry(telemetry, data, "Exception", "ExceptionData"); data.setProperties(new HashMap<>()); setOperationId(telemetry, span.getTraceId()); setOperationParentId(telemetry, span.getSpanId()); if (operationName != null) { setOperationName(telemetry, operationName); } else { setOperationName(telemetry, span.getAttributes()); } setTime(telemetry, span.getEndEpochNanos()); data.setExceptions(Exceptions.minimalParse(errorStack)); telemetryItems.add(telemetry); } private void initTelemetry(TelemetryItem telemetry, MonitorDomain data, String telemetryName, String baseType) { telemetry.setVersion(1); telemetry.setName(telemetryName); telemetry.setInstrumentationKey(instrumentationKey); telemetry.setTags(new HashMap<>()); telemetry.getTags().put(ContextTagKeys.AI_INTERNAL_SDK_VERSION.toString(), VersionGenerator.getSdkVersion()); data.setVersion(2); MonitorBase monitorBase = new MonitorBase(); telemetry.setData(monitorBase); monitorBase.setBaseType(baseType); monitorBase.setBaseData(data); } private static void setTime(TelemetryItem telemetry, long epochNanos) { telemetry.setTime(FormattedTime.offSetDateTimeFromEpochNanos(epochNanos)); } private static void addLinks(Map<String, String> properties, List<LinkData> links) { if (links.isEmpty()) { return; } StringBuilder sb = new StringBuilder(); sb.append("["); boolean first = true; for (LinkData link : links) { if (!first) { sb.append(","); } sb.append("{\"operation_Id\":\""); sb.append(link.getSpanContext().getTraceId()); sb.append("\",\"id\":\""); sb.append(link.getSpanContext().getSpanId()); sb.append("\"}"); first = false; } sb.append("]"); properties.put("_MS.links", sb.toString()); } private static void setExtraAttributes(TelemetryItem telemetry, Map<String, String> properties, Attributes attributes) { attributes.forEach((key, value) -> { String stringKey = key.getKey(); if (stringKey.equals(AZURE_NAMESPACE.getKey()) || stringKey.equals(AZURE_SDK_MESSAGE_BUS_DESTINATION.getKey()) || stringKey.equals(AZURE_SDK_ENQUEUED_TIME.getKey())) { return; } if (stringKey.equals(KAFKA_RECORD_QUEUE_TIME_MS.getKey()) || stringKey.equals(KAFKA_OFFSET.getKey())) { return; } if (stringKey.equals(SemanticAttributes.ENDUSER_ID.getKey()) && value instanceof String) { telemetry.getTags().put(ContextTagKeys.AI_USER_ID.toString(), (String) value); return; } if (stringKey.equals(SemanticAttributes.HTTP_USER_AGENT.getKey()) && value instanceof String) { telemetry.getTags().put("ai.user.userAgent", (String) value); return; } int index = stringKey.indexOf("."); String prefix = index == -1 ? stringKey : stringKey.substring(0, index); if (STANDARD_ATTRIBUTE_PREFIXES.contains(prefix)) { return; } String val = convertToString(value, key.getType()); if (value != null) { properties.put(key.getKey(), val); } }); } @Nullable private static String convertToString(Object value, AttributeType type) { switch (type) { case STRING: case BOOLEAN: case LONG: case DOUBLE: return String.valueOf(value); case STRING_ARRAY: case BOOLEAN_ARRAY: case LONG_ARRAY: case DOUBLE_ARRAY: return join((List<?>) value); default: LOGGER.warning("unexpected attribute type: {}", type); return null; } } private static <T> String join(List<T> values) { StringBuilder sb = new StringBuilder(); for (Object val : values) { if (sb.length() > 0) { sb.append(", "); } sb.append(val); } return sb.toString(); } }
class AzureMonitorTraceExporter implements SpanExporter { private static final Set<String> SQL_DB_SYSTEMS; private static final Set<String> STANDARD_ATTRIBUTE_PREFIXES; private static final AttributeKey<String> AI_OPERATION_NAME_KEY = AttributeKey.stringKey("applicationinsights.internal.operation_name"); private static final AttributeKey<String> AZURE_NAMESPACE = AttributeKey.stringKey("az.namespace"); private static final AttributeKey<String> AZURE_SDK_PEER_ADDRESS = AttributeKey.stringKey("peer.address"); private static final AttributeKey<String> AZURE_SDK_MESSAGE_BUS_DESTINATION = AttributeKey.stringKey("message_bus.destination"); private static final AttributeKey<Long> AZURE_SDK_ENQUEUED_TIME = AttributeKey.longKey("x-opt-enqueued-time"); private static final AttributeKey<Long> KAFKA_RECORD_QUEUE_TIME_MS = longKey("kafka.record.queue_time_ms"); private static final AttributeKey<Long> KAFKA_OFFSET = longKey("kafka.offset"); private static final ClientLogger LOGGER = new ClientLogger(AzureMonitorTraceExporter.class); static { Set<String> dbSystems = new HashSet<>(); dbSystems.add(SemanticAttributes.DbSystemValues.DB2); dbSystems.add(SemanticAttributes.DbSystemValues.DERBY); dbSystems.add(SemanticAttributes.DbSystemValues.MARIADB); dbSystems.add(SemanticAttributes.DbSystemValues.MSSQL); dbSystems.add(SemanticAttributes.DbSystemValues.MYSQL); dbSystems.add(SemanticAttributes.DbSystemValues.ORACLE); dbSystems.add(SemanticAttributes.DbSystemValues.POSTGRESQL); dbSystems.add(SemanticAttributes.DbSystemValues.SQLITE); dbSystems.add(SemanticAttributes.DbSystemValues.OTHER_SQL); dbSystems.add(SemanticAttributes.DbSystemValues.HSQLDB); dbSystems.add(SemanticAttributes.DbSystemValues.H2); SQL_DB_SYSTEMS = Collections.unmodifiableSet(dbSystems); Set<String> standardAttributesPrefix = new HashSet<>(); standardAttributesPrefix.add("http"); standardAttributesPrefix.add("db"); standardAttributesPrefix.add("message"); standardAttributesPrefix.add("messaging"); standardAttributesPrefix.add("rpc"); standardAttributesPrefix.add("enduser"); standardAttributesPrefix.add("net"); standardAttributesPrefix.add("peer"); standardAttributesPrefix.add("exception"); standardAttributesPrefix.add("thread"); standardAttributesPrefix.add("faas"); STANDARD_ATTRIBUTE_PREFIXES = Collections.unmodifiableSet(standardAttributesPrefix); } private final MonitorExporterAsyncClient client; private final String instrumentationKey; /** * Creates an instance of exporter that is configured with given exporter client that sends telemetry events to * Application Insights resource identified by the instrumentation key. * * @param client The client used to send data to Azure Monitor. * @param instrumentationKey The instrumentation key of Application Insights resource. */ AzureMonitorTraceExporter(MonitorExporterAsyncClient client, String instrumentationKey) { this.client = client; this.instrumentationKey = instrumentationKey; } /** * {@inheritDoc} */ @Override public CompletableResultCode export(Collection<SpanData> spans) { CompletableResultCode completableResultCode = new CompletableResultCode(); try { List<TelemetryItem> telemetryItems = new ArrayList<>(); for (SpanData span : spans) { LOGGER.verbose("exporting span: {}", span); export(span, telemetryItems); } client.export(telemetryItems) .subscriberContext(Context.of(Tracer.DISABLE_TRACING_KEY, true)) .subscribe(ignored -> { }, error -> completableResultCode.fail(), completableResultCode::succeed); return completableResultCode; } catch (Throwable t) { LOGGER.error(t.getMessage(), t); return completableResultCode.fail(); } } /** * {@inheritDoc} */ @Override public CompletableResultCode flush() { return CompletableResultCode.ofSuccess(); } /** * {@inheritDoc} */ @Override public CompletableResultCode shutdown() { return CompletableResultCode.ofSuccess(); }
class AzureMonitorTraceExporter implements SpanExporter { private static final Set<String> SQL_DB_SYSTEMS; private static final Set<String> STANDARD_ATTRIBUTE_PREFIXES; private static final AttributeKey<String> AI_OPERATION_NAME_KEY = AttributeKey.stringKey("applicationinsights.internal.operation_name"); private static final AttributeKey<String> AZURE_NAMESPACE = AttributeKey.stringKey("az.namespace"); private static final AttributeKey<String> AZURE_SDK_PEER_ADDRESS = AttributeKey.stringKey("peer.address"); private static final AttributeKey<String> AZURE_SDK_MESSAGE_BUS_DESTINATION = AttributeKey.stringKey("message_bus.destination"); private static final AttributeKey<Long> AZURE_SDK_ENQUEUED_TIME = AttributeKey.longKey("x-opt-enqueued-time"); private static final AttributeKey<Long> KAFKA_RECORD_QUEUE_TIME_MS = longKey("kafka.record.queue_time_ms"); private static final AttributeKey<Long> KAFKA_OFFSET = longKey("kafka.offset"); private static final ClientLogger LOGGER = new ClientLogger(AzureMonitorTraceExporter.class); static { Set<String> dbSystems = new HashSet<>(); dbSystems.add(SemanticAttributes.DbSystemValues.DB2); dbSystems.add(SemanticAttributes.DbSystemValues.DERBY); dbSystems.add(SemanticAttributes.DbSystemValues.MARIADB); dbSystems.add(SemanticAttributes.DbSystemValues.MSSQL); dbSystems.add(SemanticAttributes.DbSystemValues.MYSQL); dbSystems.add(SemanticAttributes.DbSystemValues.ORACLE); dbSystems.add(SemanticAttributes.DbSystemValues.POSTGRESQL); dbSystems.add(SemanticAttributes.DbSystemValues.SQLITE); dbSystems.add(SemanticAttributes.DbSystemValues.OTHER_SQL); dbSystems.add(SemanticAttributes.DbSystemValues.HSQLDB); dbSystems.add(SemanticAttributes.DbSystemValues.H2); SQL_DB_SYSTEMS = Collections.unmodifiableSet(dbSystems); Set<String> standardAttributesPrefix = new HashSet<>(); standardAttributesPrefix.add("http"); standardAttributesPrefix.add("db"); standardAttributesPrefix.add("message"); standardAttributesPrefix.add("messaging"); standardAttributesPrefix.add("rpc"); standardAttributesPrefix.add("enduser"); standardAttributesPrefix.add("net"); standardAttributesPrefix.add("peer"); standardAttributesPrefix.add("exception"); standardAttributesPrefix.add("thread"); standardAttributesPrefix.add("faas"); STANDARD_ATTRIBUTE_PREFIXES = Collections.unmodifiableSet(standardAttributesPrefix); } private final MonitorExporterAsyncClient client; private final String instrumentationKey; /** * Creates an instance of exporter that is configured with given exporter client that sends telemetry events to * Application Insights resource identified by the instrumentation key. * * @param client The client used to send data to Azure Monitor. * @param instrumentationKey The instrumentation key of Application Insights resource. */ AzureMonitorTraceExporter(MonitorExporterAsyncClient client, String instrumentationKey) { this.client = client; this.instrumentationKey = instrumentationKey; } /** * {@inheritDoc} */ @Override public CompletableResultCode export(Collection<SpanData> spans) { CompletableResultCode completableResultCode = new CompletableResultCode(); try { List<TelemetryItem> telemetryItems = new ArrayList<>(); for (SpanData span : spans) { LOGGER.verbose("exporting span: {}", span); export(span, telemetryItems); } client.export(telemetryItems) .subscriberContext(Context.of(Tracer.DISABLE_TRACING_KEY, true)) .subscribe(ignored -> { }, error -> completableResultCode.fail(), completableResultCode::succeed); return completableResultCode; } catch (Throwable t) { LOGGER.error(t.getMessage(), t); return completableResultCode.fail(); } } /** * {@inheritDoc} */ @Override public CompletableResultCode flush() { return CompletableResultCode.ofSuccess(); } /** * {@inheritDoc} */ @Override public CompletableResultCode shutdown() { return CompletableResultCode.ofSuccess(); }
Use `UncheckedIOException` instead.
public ResourceDeleteCancelEventData setAuthorization(String authorization) { try { setResourceAuthorization( defaultSerializerAdapter.deserialize(authorization, ResourceAuthorization.class, SerializerEncoding.JSON)); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } return this; }
throw LOGGER.logExceptionAsError(new RuntimeException(e));
public ResourceDeleteCancelEventData setAuthorization(String authorization) { try { setResourceAuthorization( DEFAULT_SERIALIZER_ADAPTER.deserialize(authorization, ResourceAuthorization.class, SerializerEncoding.JSON)); } catch (IOException ex) { throw LOGGER.logExceptionAsError(new UncheckedIOException(ex)); } return this; }
class ResourceDeleteCancelEventData { static final ClientLogger LOGGER = new ClientLogger(ResourceDeleteCancelEventData.class); final SerializerAdapter defaultSerializerAdapter = JacksonAdapter.createDefaultSerializerAdapter(); /* * The tenant ID of the resource. */ @JsonProperty(value = "tenantId") private String tenantId; /* * The subscription ID of the resource. */ @JsonProperty(value = "subscriptionId") private String subscriptionId; /* * The resource group of the resource. */ @JsonProperty(value = "resourceGroup") private String resourceGroup; /* * The resource provider performing the operation. */ @JsonProperty(value = "resourceProvider") private String resourceProvider; /* * The URI of the resource in the operation. */ @JsonProperty(value = "resourceUri") private String resourceUri; /* * The operation that was performed. */ @JsonProperty(value = "operationName") private String operationName; /* * The status of the operation. */ @JsonProperty(value = "status") private String status; private String authorizationString; /* * The requested authorization for the operation. */ @JsonProperty(value = "authorization") private ResourceAuthorization authorization; private String claimsString; /* * The properties of the claims. */ @JsonProperty(value = "claims") private Map<String, String> claims; /* * An operation ID used for troubleshooting. */ @JsonProperty(value = "correlationId") private String correlationId; private String httpRequestString; /* * The details of the operation. */ @JsonProperty(value = "httpRequest") private ResourceHttpRequest httpRequest; /** * Get the tenantId property: The tenant ID of the resource. * * @return the tenantId value. */ public String getTenantId() { return this.tenantId; } /** * Set the tenantId property: The tenant ID of the resource. * * @param tenantId the tenantId value to set. * @return the ResourceDeleteCancelEventData object itself. */ public ResourceDeleteCancelEventData setTenantId(String tenantId) { this.tenantId = tenantId; return this; } /** * Get the subscriptionId property: The subscription ID of the resource. * * @return the subscriptionId value. */ public String getSubscriptionId() { return this.subscriptionId; } /** * Set the subscriptionId property: The subscription ID of the resource. * * @param subscriptionId the subscriptionId value to set. * @return the ResourceDeleteCancelEventData object itself. */ public ResourceDeleteCancelEventData setSubscriptionId(String subscriptionId) { this.subscriptionId = subscriptionId; return this; } /** * Get the resourceGroup property: The resource group of the resource. * * @return the resourceGroup value. */ public String getResourceGroup() { return this.resourceGroup; } /** * Set the resourceGroup property: The resource group of the resource. * * @param resourceGroup the resourceGroup value to set. * @return the ResourceDeleteCancelEventData object itself. */ public ResourceDeleteCancelEventData setResourceGroup(String resourceGroup) { this.resourceGroup = resourceGroup; return this; } /** * Get the resourceProvider property: The resource provider performing the operation. * * @return the resourceProvider value. */ public String getResourceProvider() { return this.resourceProvider; } /** * Set the resourceProvider property: The resource provider performing the operation. * * @param resourceProvider the resourceProvider value to set. * @return the ResourceDeleteCancelEventData object itself. */ public ResourceDeleteCancelEventData setResourceProvider(String resourceProvider) { this.resourceProvider = resourceProvider; return this; } /** * Get the resourceUri property: The URI of the resource in the operation. * * @return the resourceUri value. */ public String getResourceUri() { return this.resourceUri; } /** * Set the resourceUri property: The URI of the resource in the operation. * * @param resourceUri the resourceUri value to set. * @return the ResourceDeleteCancelEventData object itself. */ public ResourceDeleteCancelEventData setResourceUri(String resourceUri) { this.resourceUri = resourceUri; return this; } /** * Get the operationName property: The operation that was performed. * * @return the operationName value. */ public String getOperationName() { return this.operationName; } /** * Set the operationName property: The operation that was performed. * * @param operationName the operationName value to set. * @return the ResourceDeleteCancelEventData object itself. */ public ResourceDeleteCancelEventData setOperationName(String operationName) { this.operationName = operationName; return this; } /** * Get the status property: The status of the operation. * * @return the status value. */ public String getStatus() { return this.status; } /** * Set the status property: The status of the operation. * * @param status the status value to set. * @return the ResourceDeleteCancelEventData object itself. */ public ResourceDeleteCancelEventData setStatus(String status) { this.status = status; return this; } /** * Get the authorization property: The requested authorization for the operation. * * @return the authorization value. * @deprecated This method is no longer supported since v4.9.0. * <p> Use {@link ResourceDeleteCancelEventData */ @Deprecated public String getAuthorization() { final ResourceAuthorization resourceAuthorization = getResourceAuthorization(); try { return defaultSerializerAdapter.serialize(resourceAuthorization, SerializerEncoding.JSON); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } /** * Set the authorization property: The requested authorization for the operation. * * @param authorization the authorization value to set. * @return the ResourceDeleteCancelEventData object itself. * @deprecated This method is no longer supported since v4.9.0. * <p> Use {@link ResourceDeleteCancelEventData */ @Deprecated /** * Get the authorization property: The requested authorization for the operation. * * @return the authorization value. */ public ResourceAuthorization getResourceAuthorization() { return this.authorization; } /** * Set the authorization property: The requested authorization for the operation. * * @param authorization the authorization value to set. * @return the ResourceDeleteCancelEventData object itself. */ public ResourceDeleteCancelEventData setResourceAuthorization(ResourceAuthorization authorization) { this.authorization = authorization; return this; } /** * Get the claims property: The properties of the claims. * * @return the claims value. * @deprecated This method is no longer supported since v4.9.0. * <p> Use {@link ResourceDeleteCancelEventData */ @Deprecated public String getClaims() { final Map<String, String> resourceClaims = getResourceClaims(); if (!resourceClaims.isEmpty()) { try { return defaultSerializerAdapter.serialize(resourceClaims, SerializerEncoding.JSON); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } return null; } /** * Set the claims property: The properties of the claims. * * @param claims the claims value to set. * @return the ResourceDeleteCancelEventData object itself. * @deprecated This method is no longer supported since v4.9.0. * <p> Use {@link ResourceDeleteCancelEventData */ @Deprecated public ResourceDeleteCancelEventData setClaims(String claims) { try { setResourceClaims(defaultSerializerAdapter.deserialize(claims, Map.class, SerializerEncoding.JSON)); } catch (IOException ex) { throw LOGGER.logExceptionAsError(new RuntimeException(ex)); } return this; } /** * Get the claims property: The properties of the claims. * * @return the claims value. */ public Map<String, String> getResourceClaims() { return this.claims; } /** * Set the claims property: The properties of the claims. * * @param claims the claims value to set. * @return the ResourceDeleteCancelEventData object itself. */ public ResourceDeleteCancelEventData setResourceClaims(Map<String, String> claims) { this.claims = claims; return this; } /** * Get the correlationId property: An operation ID used for troubleshooting. * * @return the correlationId value. */ public String getCorrelationId() { return this.correlationId; } /** * Set the correlationId property: An operation ID used for troubleshooting. * * @param correlationId the correlationId value to set. * @return the ResourceDeleteCancelEventData object itself. */ public ResourceDeleteCancelEventData setCorrelationId(String correlationId) { this.correlationId = correlationId; return this; } /** * Get the httpRequest property: The details of the operation. * * @return the httpRequest value. * @deprecated This method is no longer supported since v4.9.0. * <p> Use {@link ResourceDeleteCancelEventData */ @Deprecated public String getHttpRequest() { ResourceHttpRequest resourceHttpRequest = getResourceHttpRequest(); try { return defaultSerializerAdapter.serialize(resourceHttpRequest, SerializerEncoding.JSON); } catch (IOException ex) { throw LOGGER.logExceptionAsError(new RuntimeException(ex)); } } /** * Set the httpRequest property: The details of the operation. * * @param httpRequest the httpRequest value to set. * @return the ResourceDeleteCancelEventData object itself. * @deprecated This method is no longer supported since v4.9.0. * <p> Use {@link ResourceDeleteCancelEventData */ @Deprecated public ResourceDeleteCancelEventData setHttpRequest(String httpRequest) { try { setResourceHttpRequest( defaultSerializerAdapter.deserialize(httpRequest, ResourceHttpRequest.class, SerializerEncoding.JSON)); } catch (IOException ex) { throw LOGGER.logExceptionAsError(new RuntimeException(ex)); } return this; } /** * Get the httpRequest property: The details of the operation. * * @return the httpRequest value. */ public ResourceHttpRequest getResourceHttpRequest() { return this.httpRequest; } /** * Set the httpRequest property: The details of the operation. * * @param httpRequest the httpRequest value to set. * @return the ResourceDeleteCancelEventData object itself. */ public ResourceDeleteCancelEventData setResourceHttpRequest(ResourceHttpRequest httpRequest) { this.httpRequest = httpRequest; return this; } }
class ResourceDeleteCancelEventData { private static final ClientLogger LOGGER = new ClientLogger(ResourceDeleteCancelEventData.class); private static final SerializerAdapter DEFAULT_SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); /* * The tenant ID of the resource. */ @JsonProperty(value = "tenantId") private String tenantId; /* * The subscription ID of the resource. */ @JsonProperty(value = "subscriptionId") private String subscriptionId; /* * The resource group of the resource. */ @JsonProperty(value = "resourceGroup") private String resourceGroup; /* * The resource provider performing the operation. */ @JsonProperty(value = "resourceProvider") private String resourceProvider; /* * The URI of the resource in the operation. */ @JsonProperty(value = "resourceUri") private String resourceUri; /* * The operation that was performed. */ @JsonProperty(value = "operationName") private String operationName; /* * The status of the operation. */ @JsonProperty(value = "status") private String status; /* * The requested authorization for the operation. */ @JsonProperty(value = "authorization") private ResourceAuthorization authorization; /* * The properties of the claims. */ @JsonProperty(value = "claims") private Map<String, String> claims; /* * An operation ID used for troubleshooting. */ @JsonProperty(value = "correlationId") private String correlationId; /* * The details of the operation. */ @JsonProperty(value = "httpRequest") private ResourceHttpRequest httpRequest; /** * Get the tenantId property: The tenant ID of the resource. * * @return the tenantId value. */ public String getTenantId() { return this.tenantId; } /** * Set the tenantId property: The tenant ID of the resource. * * @param tenantId the tenantId value to set. * @return the ResourceDeleteCancelEventData object itself. */ public ResourceDeleteCancelEventData setTenantId(String tenantId) { this.tenantId = tenantId; return this; } /** * Get the subscriptionId property: The subscription ID of the resource. * * @return the subscriptionId value. */ public String getSubscriptionId() { return this.subscriptionId; } /** * Set the subscriptionId property: The subscription ID of the resource. * * @param subscriptionId the subscriptionId value to set. * @return the ResourceDeleteCancelEventData object itself. */ public ResourceDeleteCancelEventData setSubscriptionId(String subscriptionId) { this.subscriptionId = subscriptionId; return this; } /** * Get the resourceGroup property: The resource group of the resource. * * @return the resourceGroup value. */ public String getResourceGroup() { return this.resourceGroup; } /** * Set the resourceGroup property: The resource group of the resource. * * @param resourceGroup the resourceGroup value to set. * @return the ResourceDeleteCancelEventData object itself. */ public ResourceDeleteCancelEventData setResourceGroup(String resourceGroup) { this.resourceGroup = resourceGroup; return this; } /** * Get the resourceProvider property: The resource provider performing the operation. * * @return the resourceProvider value. */ public String getResourceProvider() { return this.resourceProvider; } /** * Set the resourceProvider property: The resource provider performing the operation. * * @param resourceProvider the resourceProvider value to set. * @return the ResourceDeleteCancelEventData object itself. */ public ResourceDeleteCancelEventData setResourceProvider(String resourceProvider) { this.resourceProvider = resourceProvider; return this; } /** * Get the resourceUri property: The URI of the resource in the operation. * * @return the resourceUri value. */ public String getResourceUri() { return this.resourceUri; } /** * Set the resourceUri property: The URI of the resource in the operation. * * @param resourceUri the resourceUri value to set. * @return the ResourceDeleteCancelEventData object itself. */ public ResourceDeleteCancelEventData setResourceUri(String resourceUri) { this.resourceUri = resourceUri; return this; } /** * Get the operationName property: The operation that was performed. * * @return the operationName value. */ public String getOperationName() { return this.operationName; } /** * Set the operationName property: The operation that was performed. * * @param operationName the operationName value to set. * @return the ResourceDeleteCancelEventData object itself. */ public ResourceDeleteCancelEventData setOperationName(String operationName) { this.operationName = operationName; return this; } /** * Get the status property: The status of the operation. * * @return the status value. */ public String getStatus() { return this.status; } /** * Set the status property: The status of the operation. * * @param status the status value to set. * @return the ResourceDeleteCancelEventData object itself. */ public ResourceDeleteCancelEventData setStatus(String status) { this.status = status; return this; } /** * Get the authorization property: The requested authorization for the operation. * * @return the authorization value. * @deprecated This method is no longer supported since v4.9.0. * <p> Use {@link ResourceDeleteCancelEventData */ @Deprecated public String getAuthorization() { final ResourceAuthorization resourceAuthorization = getResourceAuthorization(); try { return DEFAULT_SERIALIZER_ADAPTER.serialize(resourceAuthorization, SerializerEncoding.JSON); } catch (IOException ex) { throw LOGGER.logExceptionAsError(new UncheckedIOException(ex)); } } /** * Set the authorization property: The requested authorization for the operation. * * @param authorization the authorization value to set. * @return the ResourceDeleteCancelEventData object itself. * @deprecated This method is no longer supported since v4.9.0. * <p> Use {@link ResourceDeleteCancelEventData */ @Deprecated /** * Get the authorization property: The requested authorization for the operation. * * @return the authorization value. */ public ResourceAuthorization getResourceAuthorization() { return this.authorization; } /** * Set the authorization property: The requested authorization for the operation. * * @param authorization the authorization value to set. * @return the ResourceDeleteCancelEventData object itself. */ public ResourceDeleteCancelEventData setResourceAuthorization(ResourceAuthorization authorization) { this.authorization = authorization; return this; } /** * Get the claims property: The properties of the claims. * * @return the claims value. * @deprecated This method is no longer supported since v4.9.0. * <p> Use {@link ResourceDeleteCancelEventData */ @Deprecated public String getClaims() { final Map<String, String> resourceClaims = getResourceClaims(); if (!resourceClaims.isEmpty()) { try { return DEFAULT_SERIALIZER_ADAPTER.serialize(resourceClaims, SerializerEncoding.JSON); } catch (IOException ex) { throw LOGGER.logExceptionAsError(new UncheckedIOException(ex)); } } return null; } /** * Set the claims property: The properties of the claims. * * @param claims the claims value to set. * @return the ResourceDeleteCancelEventData object itself. * @deprecated This method is no longer supported since v4.9.0. * <p> Use {@link ResourceDeleteCancelEventData */ @Deprecated public ResourceDeleteCancelEventData setClaims(String claims) { try { setResourceClaims(DEFAULT_SERIALIZER_ADAPTER.deserialize(claims, Map.class, SerializerEncoding.JSON)); } catch (IOException ex) { throw LOGGER.logExceptionAsError(new UncheckedIOException(ex)); } return this; } /** * Get the claims property: The properties of the claims. * * @return the claims value. */ public Map<String, String> getResourceClaims() { return this.claims; } /** * Set the claims property: The properties of the claims. * * @param claims the claims value to set. * @return the ResourceDeleteCancelEventData object itself. */ public ResourceDeleteCancelEventData setResourceClaims(Map<String, String> claims) { this.claims = claims; return this; } /** * Get the correlationId property: An operation ID used for troubleshooting. * * @return the correlationId value. */ public String getCorrelationId() { return this.correlationId; } /** * Set the correlationId property: An operation ID used for troubleshooting. * * @param correlationId the correlationId value to set. * @return the ResourceDeleteCancelEventData object itself. */ public ResourceDeleteCancelEventData setCorrelationId(String correlationId) { this.correlationId = correlationId; return this; } /** * Get the httpRequest property: The details of the operation. * * @return the httpRequest value. * @deprecated This method is no longer supported since v4.9.0. * <p> Use {@link ResourceDeleteCancelEventData */ @Deprecated public String getHttpRequest() { ResourceHttpRequest resourceHttpRequest = getResourceHttpRequest(); try { return DEFAULT_SERIALIZER_ADAPTER.serialize(resourceHttpRequest, SerializerEncoding.JSON); } catch (IOException ex) { throw LOGGER.logExceptionAsError(new UncheckedIOException(ex)); } } /** * Set the httpRequest property: The details of the operation. * * @param httpRequest the httpRequest value to set. * @return the ResourceDeleteCancelEventData object itself. * @deprecated This method is no longer supported since v4.9.0. * <p> Use {@link ResourceDeleteCancelEventData */ @Deprecated public ResourceDeleteCancelEventData setHttpRequest(String httpRequest) { try { setResourceHttpRequest( DEFAULT_SERIALIZER_ADAPTER.deserialize(httpRequest, ResourceHttpRequest.class, SerializerEncoding.JSON)); } catch (IOException ex) { throw LOGGER.logExceptionAsError(new UncheckedIOException(ex)); } return this; } /** * Get the httpRequest property: The details of the operation. * * @return the httpRequest value. */ public ResourceHttpRequest getResourceHttpRequest() { return this.httpRequest; } /** * Set the httpRequest property: The details of the operation. * * @param httpRequest the httpRequest value to set. * @return the ResourceDeleteCancelEventData object itself. */ public ResourceDeleteCancelEventData setResourceHttpRequest(ResourceHttpRequest httpRequest) { this.httpRequest = httpRequest; return this; } }
Add unit test for this as we should make sure the map is getting serialized and deserialized correctly.
public String getClaims() { final Map<String, String> resourceClaims = getResourceClaims(); if (!resourceClaims.isEmpty()) { try { return defaultSerializerAdapter.serialize(resourceClaims, SerializerEncoding.JSON); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } return null; }
return defaultSerializerAdapter.serialize(resourceClaims, SerializerEncoding.JSON);
public String getClaims() { final Map<String, String> resourceClaims = getResourceClaims(); if (!resourceClaims.isEmpty()) { try { return DEFAULT_SERIALIZER_ADAPTER.serialize(resourceClaims, SerializerEncoding.JSON); } catch (IOException ex) { throw LOGGER.logExceptionAsError(new UncheckedIOException(ex)); } } return null; }
class ResourceDeleteCancelEventData { static final ClientLogger LOGGER = new ClientLogger(ResourceDeleteCancelEventData.class); final SerializerAdapter defaultSerializerAdapter = JacksonAdapter.createDefaultSerializerAdapter(); /* * The tenant ID of the resource. */ @JsonProperty(value = "tenantId") private String tenantId; /* * The subscription ID of the resource. */ @JsonProperty(value = "subscriptionId") private String subscriptionId; /* * The resource group of the resource. */ @JsonProperty(value = "resourceGroup") private String resourceGroup; /* * The resource provider performing the operation. */ @JsonProperty(value = "resourceProvider") private String resourceProvider; /* * The URI of the resource in the operation. */ @JsonProperty(value = "resourceUri") private String resourceUri; /* * The operation that was performed. */ @JsonProperty(value = "operationName") private String operationName; /* * The status of the operation. */ @JsonProperty(value = "status") private String status; private String authorizationString; /* * The requested authorization for the operation. */ @JsonProperty(value = "authorization") private ResourceAuthorization authorization; private String claimsString; /* * The properties of the claims. */ @JsonProperty(value = "claims") private Map<String, String> claims; /* * An operation ID used for troubleshooting. */ @JsonProperty(value = "correlationId") private String correlationId; private String httpRequestString; /* * The details of the operation. */ @JsonProperty(value = "httpRequest") private ResourceHttpRequest httpRequest; /** * Get the tenantId property: The tenant ID of the resource. * * @return the tenantId value. */ public String getTenantId() { return this.tenantId; } /** * Set the tenantId property: The tenant ID of the resource. * * @param tenantId the tenantId value to set. * @return the ResourceDeleteCancelEventData object itself. */ public ResourceDeleteCancelEventData setTenantId(String tenantId) { this.tenantId = tenantId; return this; } /** * Get the subscriptionId property: The subscription ID of the resource. * * @return the subscriptionId value. */ public String getSubscriptionId() { return this.subscriptionId; } /** * Set the subscriptionId property: The subscription ID of the resource. * * @param subscriptionId the subscriptionId value to set. * @return the ResourceDeleteCancelEventData object itself. */ public ResourceDeleteCancelEventData setSubscriptionId(String subscriptionId) { this.subscriptionId = subscriptionId; return this; } /** * Get the resourceGroup property: The resource group of the resource. * * @return the resourceGroup value. */ public String getResourceGroup() { return this.resourceGroup; } /** * Set the resourceGroup property: The resource group of the resource. * * @param resourceGroup the resourceGroup value to set. * @return the ResourceDeleteCancelEventData object itself. */ public ResourceDeleteCancelEventData setResourceGroup(String resourceGroup) { this.resourceGroup = resourceGroup; return this; } /** * Get the resourceProvider property: The resource provider performing the operation. * * @return the resourceProvider value. */ public String getResourceProvider() { return this.resourceProvider; } /** * Set the resourceProvider property: The resource provider performing the operation. * * @param resourceProvider the resourceProvider value to set. * @return the ResourceDeleteCancelEventData object itself. */ public ResourceDeleteCancelEventData setResourceProvider(String resourceProvider) { this.resourceProvider = resourceProvider; return this; } /** * Get the resourceUri property: The URI of the resource in the operation. * * @return the resourceUri value. */ public String getResourceUri() { return this.resourceUri; } /** * Set the resourceUri property: The URI of the resource in the operation. * * @param resourceUri the resourceUri value to set. * @return the ResourceDeleteCancelEventData object itself. */ public ResourceDeleteCancelEventData setResourceUri(String resourceUri) { this.resourceUri = resourceUri; return this; } /** * Get the operationName property: The operation that was performed. * * @return the operationName value. */ public String getOperationName() { return this.operationName; } /** * Set the operationName property: The operation that was performed. * * @param operationName the operationName value to set. * @return the ResourceDeleteCancelEventData object itself. */ public ResourceDeleteCancelEventData setOperationName(String operationName) { this.operationName = operationName; return this; } /** * Get the status property: The status of the operation. * * @return the status value. */ public String getStatus() { return this.status; } /** * Set the status property: The status of the operation. * * @param status the status value to set. * @return the ResourceDeleteCancelEventData object itself. */ public ResourceDeleteCancelEventData setStatus(String status) { this.status = status; return this; } /** * Get the authorization property: The requested authorization for the operation. * * @return the authorization value. * @deprecated This method is no longer supported since v4.9.0. * <p> Use {@link ResourceDeleteCancelEventData */ @Deprecated public String getAuthorization() { final ResourceAuthorization resourceAuthorization = getResourceAuthorization(); try { return defaultSerializerAdapter.serialize(resourceAuthorization, SerializerEncoding.JSON); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } /** * Set the authorization property: The requested authorization for the operation. * * @param authorization the authorization value to set. * @return the ResourceDeleteCancelEventData object itself. * @deprecated This method is no longer supported since v4.9.0. * <p> Use {@link ResourceDeleteCancelEventData */ @Deprecated public ResourceDeleteCancelEventData setAuthorization(String authorization) { try { setResourceAuthorization( defaultSerializerAdapter.deserialize(authorization, ResourceAuthorization.class, SerializerEncoding.JSON)); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } return this; } /** * Get the authorization property: The requested authorization for the operation. * * @return the authorization value. */ public ResourceAuthorization getResourceAuthorization() { return this.authorization; } /** * Set the authorization property: The requested authorization for the operation. * * @param authorization the authorization value to set. * @return the ResourceDeleteCancelEventData object itself. */ public ResourceDeleteCancelEventData setResourceAuthorization(ResourceAuthorization authorization) { this.authorization = authorization; return this; } /** * Get the claims property: The properties of the claims. * * @return the claims value. * @deprecated This method is no longer supported since v4.9.0. * <p> Use {@link ResourceDeleteCancelEventData */ @Deprecated /** * Set the claims property: The properties of the claims. * * @param claims the claims value to set. * @return the ResourceDeleteCancelEventData object itself. * @deprecated This method is no longer supported since v4.9.0. * <p> Use {@link ResourceDeleteCancelEventData */ @Deprecated public ResourceDeleteCancelEventData setClaims(String claims) { try { setResourceClaims(defaultSerializerAdapter.deserialize(claims, Map.class, SerializerEncoding.JSON)); } catch (IOException ex) { throw LOGGER.logExceptionAsError(new RuntimeException(ex)); } return this; } /** * Get the claims property: The properties of the claims. * * @return the claims value. */ public Map<String, String> getResourceClaims() { return this.claims; } /** * Set the claims property: The properties of the claims. * * @param claims the claims value to set. * @return the ResourceDeleteCancelEventData object itself. */ public ResourceDeleteCancelEventData setResourceClaims(Map<String, String> claims) { this.claims = claims; return this; } /** * Get the correlationId property: An operation ID used for troubleshooting. * * @return the correlationId value. */ public String getCorrelationId() { return this.correlationId; } /** * Set the correlationId property: An operation ID used for troubleshooting. * * @param correlationId the correlationId value to set. * @return the ResourceDeleteCancelEventData object itself. */ public ResourceDeleteCancelEventData setCorrelationId(String correlationId) { this.correlationId = correlationId; return this; } /** * Get the httpRequest property: The details of the operation. * * @return the httpRequest value. * @deprecated This method is no longer supported since v4.9.0. * <p> Use {@link ResourceDeleteCancelEventData */ @Deprecated public String getHttpRequest() { ResourceHttpRequest resourceHttpRequest = getResourceHttpRequest(); try { return defaultSerializerAdapter.serialize(resourceHttpRequest, SerializerEncoding.JSON); } catch (IOException ex) { throw LOGGER.logExceptionAsError(new RuntimeException(ex)); } } /** * Set the httpRequest property: The details of the operation. * * @param httpRequest the httpRequest value to set. * @return the ResourceDeleteCancelEventData object itself. * @deprecated This method is no longer supported since v4.9.0. * <p> Use {@link ResourceDeleteCancelEventData */ @Deprecated public ResourceDeleteCancelEventData setHttpRequest(String httpRequest) { try { setResourceHttpRequest( defaultSerializerAdapter.deserialize(httpRequest, ResourceHttpRequest.class, SerializerEncoding.JSON)); } catch (IOException ex) { throw LOGGER.logExceptionAsError(new RuntimeException(ex)); } return this; } /** * Get the httpRequest property: The details of the operation. * * @return the httpRequest value. */ public ResourceHttpRequest getResourceHttpRequest() { return this.httpRequest; } /** * Set the httpRequest property: The details of the operation. * * @param httpRequest the httpRequest value to set. * @return the ResourceDeleteCancelEventData object itself. */ public ResourceDeleteCancelEventData setResourceHttpRequest(ResourceHttpRequest httpRequest) { this.httpRequest = httpRequest; return this; } }
class ResourceDeleteCancelEventData { private static final ClientLogger LOGGER = new ClientLogger(ResourceDeleteCancelEventData.class); private static final SerializerAdapter DEFAULT_SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); /* * The tenant ID of the resource. */ @JsonProperty(value = "tenantId") private String tenantId; /* * The subscription ID of the resource. */ @JsonProperty(value = "subscriptionId") private String subscriptionId; /* * The resource group of the resource. */ @JsonProperty(value = "resourceGroup") private String resourceGroup; /* * The resource provider performing the operation. */ @JsonProperty(value = "resourceProvider") private String resourceProvider; /* * The URI of the resource in the operation. */ @JsonProperty(value = "resourceUri") private String resourceUri; /* * The operation that was performed. */ @JsonProperty(value = "operationName") private String operationName; /* * The status of the operation. */ @JsonProperty(value = "status") private String status; /* * The requested authorization for the operation. */ @JsonProperty(value = "authorization") private ResourceAuthorization authorization; /* * The properties of the claims. */ @JsonProperty(value = "claims") private Map<String, String> claims; /* * An operation ID used for troubleshooting. */ @JsonProperty(value = "correlationId") private String correlationId; /* * The details of the operation. */ @JsonProperty(value = "httpRequest") private ResourceHttpRequest httpRequest; /** * Get the tenantId property: The tenant ID of the resource. * * @return the tenantId value. */ public String getTenantId() { return this.tenantId; } /** * Set the tenantId property: The tenant ID of the resource. * * @param tenantId the tenantId value to set. * @return the ResourceDeleteCancelEventData object itself. */ public ResourceDeleteCancelEventData setTenantId(String tenantId) { this.tenantId = tenantId; return this; } /** * Get the subscriptionId property: The subscription ID of the resource. * * @return the subscriptionId value. */ public String getSubscriptionId() { return this.subscriptionId; } /** * Set the subscriptionId property: The subscription ID of the resource. * * @param subscriptionId the subscriptionId value to set. * @return the ResourceDeleteCancelEventData object itself. */ public ResourceDeleteCancelEventData setSubscriptionId(String subscriptionId) { this.subscriptionId = subscriptionId; return this; } /** * Get the resourceGroup property: The resource group of the resource. * * @return the resourceGroup value. */ public String getResourceGroup() { return this.resourceGroup; } /** * Set the resourceGroup property: The resource group of the resource. * * @param resourceGroup the resourceGroup value to set. * @return the ResourceDeleteCancelEventData object itself. */ public ResourceDeleteCancelEventData setResourceGroup(String resourceGroup) { this.resourceGroup = resourceGroup; return this; } /** * Get the resourceProvider property: The resource provider performing the operation. * * @return the resourceProvider value. */ public String getResourceProvider() { return this.resourceProvider; } /** * Set the resourceProvider property: The resource provider performing the operation. * * @param resourceProvider the resourceProvider value to set. * @return the ResourceDeleteCancelEventData object itself. */ public ResourceDeleteCancelEventData setResourceProvider(String resourceProvider) { this.resourceProvider = resourceProvider; return this; } /** * Get the resourceUri property: The URI of the resource in the operation. * * @return the resourceUri value. */ public String getResourceUri() { return this.resourceUri; } /** * Set the resourceUri property: The URI of the resource in the operation. * * @param resourceUri the resourceUri value to set. * @return the ResourceDeleteCancelEventData object itself. */ public ResourceDeleteCancelEventData setResourceUri(String resourceUri) { this.resourceUri = resourceUri; return this; } /** * Get the operationName property: The operation that was performed. * * @return the operationName value. */ public String getOperationName() { return this.operationName; } /** * Set the operationName property: The operation that was performed. * * @param operationName the operationName value to set. * @return the ResourceDeleteCancelEventData object itself. */ public ResourceDeleteCancelEventData setOperationName(String operationName) { this.operationName = operationName; return this; } /** * Get the status property: The status of the operation. * * @return the status value. */ public String getStatus() { return this.status; } /** * Set the status property: The status of the operation. * * @param status the status value to set. * @return the ResourceDeleteCancelEventData object itself. */ public ResourceDeleteCancelEventData setStatus(String status) { this.status = status; return this; } /** * Get the authorization property: The requested authorization for the operation. * * @return the authorization value. * @deprecated This method is no longer supported since v4.9.0. * <p> Use {@link ResourceDeleteCancelEventData */ @Deprecated public String getAuthorization() { final ResourceAuthorization resourceAuthorization = getResourceAuthorization(); try { return DEFAULT_SERIALIZER_ADAPTER.serialize(resourceAuthorization, SerializerEncoding.JSON); } catch (IOException ex) { throw LOGGER.logExceptionAsError(new UncheckedIOException(ex)); } } /** * Set the authorization property: The requested authorization for the operation. * * @param authorization the authorization value to set. * @return the ResourceDeleteCancelEventData object itself. * @deprecated This method is no longer supported since v4.9.0. * <p> Use {@link ResourceDeleteCancelEventData */ @Deprecated public ResourceDeleteCancelEventData setAuthorization(String authorization) { try { setResourceAuthorization( DEFAULT_SERIALIZER_ADAPTER.deserialize(authorization, ResourceAuthorization.class, SerializerEncoding.JSON)); } catch (IOException ex) { throw LOGGER.logExceptionAsError(new UncheckedIOException(ex)); } return this; } /** * Get the authorization property: The requested authorization for the operation. * * @return the authorization value. */ public ResourceAuthorization getResourceAuthorization() { return this.authorization; } /** * Set the authorization property: The requested authorization for the operation. * * @param authorization the authorization value to set. * @return the ResourceDeleteCancelEventData object itself. */ public ResourceDeleteCancelEventData setResourceAuthorization(ResourceAuthorization authorization) { this.authorization = authorization; return this; } /** * Get the claims property: The properties of the claims. * * @return the claims value. * @deprecated This method is no longer supported since v4.9.0. * <p> Use {@link ResourceDeleteCancelEventData */ @Deprecated /** * Set the claims property: The properties of the claims. * * @param claims the claims value to set. * @return the ResourceDeleteCancelEventData object itself. * @deprecated This method is no longer supported since v4.9.0. * <p> Use {@link ResourceDeleteCancelEventData */ @Deprecated public ResourceDeleteCancelEventData setClaims(String claims) { try { setResourceClaims(DEFAULT_SERIALIZER_ADAPTER.deserialize(claims, Map.class, SerializerEncoding.JSON)); } catch (IOException ex) { throw LOGGER.logExceptionAsError(new UncheckedIOException(ex)); } return this; } /** * Get the claims property: The properties of the claims. * * @return the claims value. */ public Map<String, String> getResourceClaims() { return this.claims; } /** * Set the claims property: The properties of the claims. * * @param claims the claims value to set. * @return the ResourceDeleteCancelEventData object itself. */ public ResourceDeleteCancelEventData setResourceClaims(Map<String, String> claims) { this.claims = claims; return this; } /** * Get the correlationId property: An operation ID used for troubleshooting. * * @return the correlationId value. */ public String getCorrelationId() { return this.correlationId; } /** * Set the correlationId property: An operation ID used for troubleshooting. * * @param correlationId the correlationId value to set. * @return the ResourceDeleteCancelEventData object itself. */ public ResourceDeleteCancelEventData setCorrelationId(String correlationId) { this.correlationId = correlationId; return this; } /** * Get the httpRequest property: The details of the operation. * * @return the httpRequest value. * @deprecated This method is no longer supported since v4.9.0. * <p> Use {@link ResourceDeleteCancelEventData */ @Deprecated public String getHttpRequest() { ResourceHttpRequest resourceHttpRequest = getResourceHttpRequest(); try { return DEFAULT_SERIALIZER_ADAPTER.serialize(resourceHttpRequest, SerializerEncoding.JSON); } catch (IOException ex) { throw LOGGER.logExceptionAsError(new UncheckedIOException(ex)); } } /** * Set the httpRequest property: The details of the operation. * * @param httpRequest the httpRequest value to set. * @return the ResourceDeleteCancelEventData object itself. * @deprecated This method is no longer supported since v4.9.0. * <p> Use {@link ResourceDeleteCancelEventData */ @Deprecated public ResourceDeleteCancelEventData setHttpRequest(String httpRequest) { try { setResourceHttpRequest( DEFAULT_SERIALIZER_ADAPTER.deserialize(httpRequest, ResourceHttpRequest.class, SerializerEncoding.JSON)); } catch (IOException ex) { throw LOGGER.logExceptionAsError(new UncheckedIOException(ex)); } return this; } /** * Get the httpRequest property: The details of the operation. * * @return the httpRequest value. */ public ResourceHttpRequest getResourceHttpRequest() { return this.httpRequest; } /** * Set the httpRequest property: The details of the operation. * * @param httpRequest the httpRequest value to set. * @return the ResourceDeleteCancelEventData object itself. */ public ResourceDeleteCancelEventData setResourceHttpRequest(ResourceHttpRequest httpRequest) { this.httpRequest = httpRequest; return this; } }
Instead of capturing and resetting the position a `readOnly` view can be passed into `MessageDigest`. https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html#asReadOnlyBuffer-- #Resolved
public static String byteArrayToHex(byte[] a) { StringBuilder sb = new StringBuilder(a.length * 2); for (byte b: a) { sb.append(String.format("%02x", b)); } return sb.toString(); }
for (byte b: a) {
public static String byteArrayToHex(byte[] a) { StringBuilder sb = new StringBuilder(a.length * 2); for (byte b: a) { sb.append(String.format("%02x", b)); } return sb.toString(); }
class UtilsImpl { private static final String CLIENT_NAME; private static final String CLIENT_VERSION; private static final int HTTP_STATUS_CODE_NOT_FOUND = 404; private static final int HTTP_STATUS_CODE_ACCEPTED = 202; public static final String OCI_MANIFEST_MEDIA_TYPE; public static final String DOCKER_DIGEST_HEADER_NAME; static { Map<String, String> properties = CoreUtils.getProperties("azure-containers-containerregistry.properties"); CLIENT_NAME = properties.getOrDefault("name", "UnknownName"); CLIENT_VERSION = properties.getOrDefault("version", "UnknownVersion"); OCI_MANIFEST_MEDIA_TYPE = "application/vnd.oci.image.manifest.v1+json"; DOCKER_DIGEST_HEADER_NAME = "Docker-Content-Digest"; } /** * This method builds the httpPipeline for the builders. * @param clientOptions The client options * @param logOptions http log options. * @param configuration configuration settings. * @param retryPolicy retry policy * @param credential credentials. * @param perCallPolicies per call policies. * @param perRetryPolicies per retry policies. * @param httpClient http client * @param endpoint endpoint to be called * @param serviceVersion the service api version being targeted by the client. * @return returns the httpPipeline to be consumed by the builders. */ public static HttpPipeline buildHttpPipeline( ClientOptions clientOptions, HttpLogOptions logOptions, Configuration configuration, RetryPolicy retryPolicy, TokenCredential credential, ContainerRegistryAudience audience, List<HttpPipelinePolicy> perCallPolicies, List<HttpPipelinePolicy> perRetryPolicies, HttpClient httpClient, String endpoint, ContainerRegistryServiceVersion serviceVersion, ClientLogger logger) { ArrayList<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add( new UserAgentPolicy(CoreUtils.getApplicationId(clientOptions, logOptions), CLIENT_NAME, CLIENT_VERSION, configuration)); policies.add(new RequestIdPolicy()); policies.addAll(perCallPolicies); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy == null ? new RetryPolicy() : retryPolicy); policies.add(new CookiePolicy()); policies.add(new AddDatePolicy()); policies.addAll(perRetryPolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); HttpLoggingPolicy loggingPolicy = new HttpLoggingPolicy(logOptions); if (credential == null) { logger.verbose("Credentials are null, enabling anonymous access"); } ArrayList<HttpPipelinePolicy> credentialPolicies = clone(policies); credentialPolicies.add(loggingPolicy); ContainerRegistryTokenService tokenService = new ContainerRegistryTokenService( credential, audience, endpoint, serviceVersion, new HttpPipelineBuilder() .policies(credentialPolicies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(), JacksonAdapter.createDefaultSerializerAdapter()); ContainerRegistryCredentialsPolicy credentialsPolicy = new ContainerRegistryCredentialsPolicy(tokenService); policies.add(credentialsPolicy); policies.add(loggingPolicy); HttpPipeline httpPipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return httpPipeline; } private static ArrayList<HttpPipelinePolicy> clone(ArrayList<HttpPipelinePolicy> policies) { ArrayList<HttpPipelinePolicy> clonedPolicy = new ArrayList<>(); for (HttpPipelinePolicy policy:policies) { clonedPolicy.add(policy); } return clonedPolicy; } /** * This method computes the digest for the buffer content. * Docker digest is a SHA256 hash of the docker image content and is deterministic based on the image build. * @param buffer The buffer containing the image bytes. * @return SHA-256 digest for the given buffer. */ public static String computeDigest(ByteBuffer buffer) { int startPosition = buffer.position(); try { MessageDigest md = MessageDigest.getInstance("SHA-256"); md.update(buffer); byte[] digest = md.digest(); return "sha256:" + byteArrayToHex(digest); } catch (NoSuchAlgorithmException e) { } finally { buffer.position(startPosition); } return null; } /** * Delete operation should be idempotent. * And so should result in a success in case the service response is 400 : Not found. * @param responseT The response object. * @param <T> The encapsulating value. * @return The transformed response object. */ public static <T> Mono<Response<Void>> deleteResponseToSuccess(Response<T> responseT) { if (responseT.getStatusCode() != HTTP_STATUS_CODE_NOT_FOUND) { return getAcceptedDeleteResponse(responseT, responseT.getStatusCode()); } return getAcceptedDeleteResponse(responseT, HTTP_STATUS_CODE_ACCEPTED); } static <T> Mono<Response<Void>> getAcceptedDeleteResponse(Response<T> responseT, int statusCode) { return Mono.just(new ResponseBase<String, Void>( responseT.getRequest(), statusCode, responseT.getHeaders(), null, null)); } /** * This method converts the API response codes into well known exceptions. * @param exception The exception returned by the rest client. * @return The exception returned by the public methods. */ public static Throwable mapException(Throwable exception) { AcrErrorsException acrException = null; if (exception instanceof AcrErrorsException) { acrException = ((AcrErrorsException) exception); } else if (exception instanceof RuntimeException) { RuntimeException runtimeException = (RuntimeException) exception; Throwable throwable = runtimeException.getCause(); if (throwable instanceof AcrErrorsException) { acrException = (AcrErrorsException) throwable; } } if (acrException == null) { return exception; } final HttpResponse errorHttpResponse = acrException.getResponse(); final int statusCode = errorHttpResponse.getStatusCode(); final String errorDetail = acrException.getMessage(); switch (statusCode) { case 401: return new ClientAuthenticationException(errorDetail, acrException.getResponse(), exception); case 404: return new ResourceNotFoundException(errorDetail, acrException.getResponse(), exception); case 409: return new ResourceExistsException(errorDetail, acrException.getResponse(), exception); case 412: return new ResourceModifiedException(errorDetail, acrException.getResponse(), exception); default: return new HttpResponseException(errorDetail, acrException.getResponse(), exception); } } }
class UtilsImpl { private static final String CLIENT_NAME; private static final String CLIENT_VERSION; private static final int HTTP_STATUS_CODE_NOT_FOUND; private static final int HTTP_STATUS_CODE_ACCEPTED; private static final String CONTINUATION_LINK_HEADER_NAME; private static final Pattern CONTINUATION_LINK_PATTERN; public static final String OCI_MANIFEST_MEDIA_TYPE; public static final String DOCKER_DIGEST_HEADER_NAME; public static final String CONTAINER_REGISTRY_TRACING_NAMESPACE_VALUE; static { Map<String, String> properties = CoreUtils.getProperties("azure-containers-containerregistry.properties"); CLIENT_NAME = properties.getOrDefault("name", "UnknownName"); CLIENT_VERSION = properties.getOrDefault("version", "UnknownVersion"); HTTP_STATUS_CODE_NOT_FOUND = 404; HTTP_STATUS_CODE_ACCEPTED = 202; OCI_MANIFEST_MEDIA_TYPE = "application/vnd.oci.image.manifest.v1+json"; DOCKER_DIGEST_HEADER_NAME = "Docker-Content-Digest"; CONTINUATION_LINK_HEADER_NAME = "Link"; CONTINUATION_LINK_PATTERN = Pattern.compile("<(.+)>;.*"); CONTAINER_REGISTRY_TRACING_NAMESPACE_VALUE = "Microsoft.ContainerRegistry"; } private UtilsImpl() { } /** * This method builds the httpPipeline for the builders. * @param clientOptions The client options * @param logOptions http log options. * @param configuration configuration settings. * @param retryPolicy retry policy * @param retryOptions retry options * @param credential credentials. * @param perCallPolicies per call policies. * @param perRetryPolicies per retry policies. * @param httpClient http client * @param endpoint endpoint to be called * @param serviceVersion the service api version being targeted by the client. * @return returns the httpPipeline to be consumed by the builders. */ public static HttpPipeline buildHttpPipeline( ClientOptions clientOptions, HttpLogOptions logOptions, Configuration configuration, RetryPolicy retryPolicy, RetryOptions retryOptions, TokenCredential credential, ContainerRegistryAudience audience, List<HttpPipelinePolicy> perCallPolicies, List<HttpPipelinePolicy> perRetryPolicies, HttpClient httpClient, String endpoint, ContainerRegistryServiceVersion serviceVersion, ClientLogger logger) { ArrayList<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add( new UserAgentPolicy(CoreUtils.getApplicationId(clientOptions, logOptions), CLIENT_NAME, CLIENT_VERSION, configuration)); policies.add(new RequestIdPolicy()); policies.addAll(perCallPolicies); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(ClientBuilderUtil.validateAndGetRetryPolicy(retryPolicy, retryOptions)); policies.add(new CookiePolicy()); policies.add(new AddDatePolicy()); policies.addAll(perRetryPolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); HttpLoggingPolicy loggingPolicy = new HttpLoggingPolicy(logOptions); if (credential == null) { logger.verbose("Credentials are null, enabling anonymous access"); } ArrayList<HttpPipelinePolicy> credentialPolicies = clone(policies); credentialPolicies.add(loggingPolicy); if (audience == null) { audience = ContainerRegistryAudience.AZURE_RESOURCE_MANAGER_PUBLIC_CLOUD; } ContainerRegistryTokenService tokenService = new ContainerRegistryTokenService( credential, audience, endpoint, serviceVersion, new HttpPipelineBuilder() .policies(credentialPolicies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(), JacksonAdapter.createDefaultSerializerAdapter()); ContainerRegistryCredentialsPolicy credentialsPolicy = new ContainerRegistryCredentialsPolicy(tokenService); policies.add(credentialsPolicy); policies.add(loggingPolicy); HttpPipeline httpPipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return httpPipeline; } private static ArrayList<HttpPipelinePolicy> clone(ArrayList<HttpPipelinePolicy> policies) { ArrayList<HttpPipelinePolicy> clonedPolicy = new ArrayList<>(); for (HttpPipelinePolicy policy:policies) { clonedPolicy.add(policy); } return clonedPolicy; } /** * This method computes the digest for the buffer content. * Docker digest is a SHA256 hash of the docker image content and is deterministic based on the image build. * @param buffer The buffer containing the image bytes. * @return SHA-256 digest for the given buffer. */ public static String computeDigest(ByteBuffer buffer) { ByteBuffer readOnlyBuffer = buffer.asReadOnlyBuffer(); try { MessageDigest md = MessageDigest.getInstance("SHA-256"); md.update(readOnlyBuffer); byte[] digest = md.digest(); return "sha256:" + byteArrayToHex(digest); } catch (NoSuchAlgorithmException e) { } return null; } /** * Delete operation should be idempotent. * And so should result in a success in case the service response is 400 : Not found. * @param responseT The response object. * @param <T> The encapsulating value. * @return The transformed response object. */ public static <T> Mono<Response<Void>> deleteResponseToSuccess(Response<T> responseT) { if (responseT.getStatusCode() != HTTP_STATUS_CODE_NOT_FOUND) { return getAcceptedDeleteResponse(responseT, responseT.getStatusCode()); } return getAcceptedDeleteResponse(responseT, HTTP_STATUS_CODE_ACCEPTED); } static <T> Mono<Response<Void>> getAcceptedDeleteResponse(Response<T> responseT, int statusCode) { return Mono.just(new ResponseBase<String, Void>( responseT.getRequest(), statusCode, responseT.getHeaders(), null, null)); } /** * This method converts the API response codes into well known exceptions. * @param exception The exception returned by the rest client. * @return The exception returned by the public methods. */ public static Throwable mapException(Throwable exception) { AcrErrorsException acrException = null; if (exception instanceof AcrErrorsException) { acrException = ((AcrErrorsException) exception); } else if (exception instanceof RuntimeException) { RuntimeException runtimeException = (RuntimeException) exception; Throwable throwable = runtimeException.getCause(); if (throwable instanceof AcrErrorsException) { acrException = (AcrErrorsException) throwable; } } if (acrException == null) { return exception; } final HttpResponse errorHttpResponse = acrException.getResponse(); final int statusCode = errorHttpResponse.getStatusCode(); final String errorDetail = acrException.getMessage(); switch (statusCode) { case 401: return new ClientAuthenticationException(errorDetail, acrException.getResponse(), exception); case 404: return new ResourceNotFoundException(errorDetail, acrException.getResponse(), exception); case 409: return new ResourceExistsException(errorDetail, acrException.getResponse(), exception); case 412: return new ResourceModifiedException(errorDetail, acrException.getResponse(), exception); default: return new HttpResponseException(errorDetail, acrException.getResponse(), exception); } } /** * This method parses the response to get the continuation token used to make the next pagination call. * The continuation token is returned by the service in the form of a header and not as a nextLink field. * @param listResponse response that is parsed. * @param <T> the model type that is being operated on. * @return paged response with the correct continuation token. */ public static <T> PagedResponse<T> getPagedResponseWithContinuationToken(PagedResponse<T> listResponse) { return getPagedResponseWithContinuationToken(listResponse, values -> values); } /** * This method parses the response to get the continuation token used to make the next pagination call. * The continuation token is returned by the service in the form of a header and not as a nextLink field. * * <p> * Per the Docker v2 HTTP API spec, the Link header is an RFC5988 * compliant rel='next' with URL to next result set, if available. * See: https: * * The URI reference can be obtained from link-value as follows: * Link = "Link" ":" * link-value = "<" URI-Reference ">" * (";" link-param ) * See: https: * </p> * @param listResponse response that is parsed. * @param mapperFunction the function that maps the rest api response into the public model exposed by the client. * @param <T> The model type returned by the rest client. * @param <R> The model type returned by the public client. * @return paged response with the correct continuation token. */ public static <T, R> PagedResponse<T> getPagedResponseWithContinuationToken(PagedResponse<R> listResponse, Function<List<R>, List<T>> mapperFunction) { Objects.requireNonNull(mapperFunction); String continuationLink = null; HttpHeaders headers = listResponse.getHeaders(); if (headers != null) { String continuationLinkHeader = headers.getValue(CONTINUATION_LINK_HEADER_NAME); if (!CoreUtils.isNullOrEmpty(continuationLinkHeader)) { Matcher matcher = CONTINUATION_LINK_PATTERN.matcher(continuationLinkHeader); if (matcher.matches()) { if (matcher.groupCount() == 1) { continuationLink = matcher.group(1); } } } } List<T> values = mapperFunction.apply(listResponse.getValue()); return new PagedResponseBase<String, T>( listResponse.getRequest(), listResponse.getStatusCode(), listResponse.getHeaders(), values, continuationLink, null ); } }
Did the service remove API version? #Resolved
public TokenServiceImpl(String url, ContainerRegistryServiceVersion apiVersion, HttpPipeline pipeline, SerializerAdapter serializerAdapter) { if (serializerAdapter == null) { serializerAdapter = JacksonAdapter.createDefaultSerializerAdapter(); } this.authenticationsImpl = new AuthenticationsImpl(url, pipeline, serializerAdapter); }
public TokenServiceImpl(String url, ContainerRegistryServiceVersion apiVersion, HttpPipeline pipeline, SerializerAdapter serializerAdapter) { if (serializerAdapter == null) { serializerAdapter = JacksonAdapter.createDefaultSerializerAdapter(); } if (apiVersion == null) { apiVersion = ContainerRegistryServiceVersion.getLatest(); } this.authenticationsImpl = new AuthenticationsImpl(url, apiVersion.getVersion(), pipeline, serializerAdapter); }
class TokenServiceImpl { private final AuthenticationsImpl authenticationsImpl; private static final String REFRESHTOKEN_GRANTTYPE = "refresh_token"; private static final String ACCESSTOKEN_GRANTTYPE = "access_token"; /** * Creates an instance of the token service impl class.TokenServiceImpl.java * @param url the service endpoint. * @param apiVersion the api-version of the service being targeted. * @param pipeline the pipeline to use to make the call. * @param serializerAdapter the serializer adapter for the rest client. * */ /** * Gets the ACR access token. * @param acrRefreshToken Given the ACRs refresh token. * @param scope - Token scope. * @param serviceName The name of the service. * */ public Mono<AccessToken> getAcrAccessTokenAsync(String acrRefreshToken, String scope, String serviceName, String grantType) { return this.authenticationsImpl.exchangeAcrRefreshTokenForAcrAccessTokenAsync(serviceName, scope, grantType, acrRefreshToken) .map(token -> { String accessToken = token.getAccessToken(); OffsetDateTime expirationTime = JsonWebToken.retrieveExpiration(accessToken); return new AccessToken(accessToken, expirationTime); }); } /** * Gets an ACR refresh token. * @param aadAccessToken Given the ACR access token. * @param serviceName Given the ACR service. * */ public Mono<AccessToken> getAcrRefreshTokenAsync(String aadAccessToken, String serviceName) { return this.authenticationsImpl.exchangeAadAccessTokenForAcrRefreshTokenAsync( serviceName, aadAccessToken).map(token -> { String refreshToken = token.getRefreshToken(); OffsetDateTime expirationTime = JsonWebToken.retrieveExpiration(refreshToken); return new AccessToken(refreshToken, expirationTime); }); } }
class TokenServiceImpl { private final AuthenticationsImpl authenticationsImpl; private static final String REFRESHTOKEN_GRANTTYPE = "refresh_token"; private static final String ACCESSTOKEN_GRANTTYPE = "access_token"; /** * Creates an instance of the token service impl class.TokenServiceImpl.java * * @param url the service endpoint. * @param apiVersion the api-version of the service being targeted. * @param pipeline the pipeline to use to make the call. * @param serializerAdapter the serializer adapter for the rest client. */ /** * Gets the ACR access token. * * @param acrRefreshToken Given the ACRs refresh token. * @param scope - Token scope. * @param serviceName The name of the service. */ public Mono<AccessToken> getAcrAccessTokenAsync(String acrRefreshToken, String scope, String serviceName, TokenGrantType grantType) { return withContext(context -> this.authenticationsImpl.exchangeAcrRefreshTokenForAcrAccessTokenWithResponseAsync(serviceName, scope, acrRefreshToken, grantType, context) .flatMap(response -> { AcrAccessToken token = response.getValue(); if (token != null) { String accessToken = token.getAccessToken(); OffsetDateTime expirationTime = JsonWebToken.retrieveExpiration(accessToken); return Mono.just(new AccessToken(accessToken, expirationTime)); } return Mono.empty(); })); } /** * Gets an ACR refresh token. * * @param aadAccessToken Given the ACR access token. * @param serviceName Given the ACR service. */ public Mono<AccessToken> getAcrRefreshTokenAsync(String aadAccessToken, String serviceName) { return withContext(context -> this.authenticationsImpl.exchangeAadAccessTokenForAcrRefreshTokenWithResponseAsync(PostContentSchemaGrantType.ACCESS_TOKEN, serviceName, null, null, aadAccessToken, context).flatMap(response -> { AcrRefreshToken token = response.getValue(); if (token != null) { String refreshToken = token.getRefreshToken(); OffsetDateTime expirationTime = JsonWebToken.retrieveExpiration(refreshToken); return Mono.just(new AccessToken(refreshToken, expirationTime)); } return Mono.empty(); })); } }
This should be returned as a `Mono.error`, same for other locations in this file that throw #Resolved
public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) { if (manifest == null) { throw logger.logExceptionAsError(new NullPointerException("'manifest' can't be null.")); } try { byte[] bytes = this.registryImplClient.getSerializerAdapter().serializeToBytes(manifest, SerializerEncoding.JSON); return withContext(context -> this.uploadManifestWithResponse(Flux.just(ByteBuffer.wrap(bytes)), context)).flatMap(FluxUtil::toMono); } catch (IOException exception) { return monoError(logger, new RuntimeException(exception.getMessage())); } }
throw logger.logExceptionAsError(new NullPointerException("'manifest' can't be null."));
public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) { if (manifest == null) { return monoError(logger, new NullPointerException("'manifest' can't be null.")); } return uploadManifest(BinaryData.fromObject(manifest)); }
class ContainerRegistryBlobAsyncClient { private final AzureContainerRegistryImpl registryImplClient; private final ContainerRegistryBlobsImpl blobsImpl; private final ContainerRegistriesImpl registriesImpl; private final HttpPipeline httpPipeline; private final String endpoint; private final String apiVersion; private final String repositoryName; private final String registryLoginServer; private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class); ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) { this.repositoryName = repositoryName; this.httpPipeline = httpPipeline; this.endpoint = endpoint; this.registryImplClient = new AzureContainerRegistryImplBuilder() .url(endpoint) .pipeline(httpPipeline) .buildClient(); this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs(); this.registriesImpl = this.registryImplClient.getContainerRegistries(); this.apiVersion = version; try { URL endpointUrl = new URL(endpoint); this.registryLoginServer = endpointUrl.getHost(); } catch (MalformedURLException ex) { throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL")); } } /** * This method returns the registry's repository on which operations are being performed. * * @return The name of the repository */ public String getRepositoryName() { return this.repositoryName; } /** * This method returns the complete registry endpoint. * * @return The registry endpoint including the authority. */ public String getEndpoint() { return this.endpoint; } /** * Upload the Oci manifest to the repository. * The upload is done as a single operation. * @see <a href="https: * @param manifest The OciManifest that needs to be uploaded. * @return operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code manifest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) /** * Uploads a manifest to the repository. * The client currently only supports uploading OciManifests to the repository. * And this operation makes the assumption that the data provided is a valid OCI manifest. * <p> * Also, the data is read into memory and then an upload operation is performed as a single operation. * @see <a href="https: * @param data The manifest that needs to be uploaded. * @return operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadManifestResult> uploadManifest(BinaryData data) { if (data == null) { throw logger.logExceptionAsError(new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(data.toFluxByteBuffer(), context)).flatMap(FluxUtil::toMono); } /** * Uploads a manifest to the repository. * The client currently only supports uploading OciManifests to the repository. * And this operation makes the assumption that the data provided is a valid OCI manifest. * <p> * Also, the data is read into memory and then an upload operation is performed as a single operation. * @see <a href="https: * * @param data The manifest that needs to be uploaded. * @return The rest response containing the operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data) { if (data == null) { throw logger.logExceptionAsError(new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(data.toFluxByteBuffer(), context)); } Mono<Response<UploadManifestResult>> uploadManifestWithResponse(Flux<ByteBuffer> data, Context context) { if (data == null) { throw logger.logExceptionAsError(new NullPointerException("'data' can't be null.")); } BufferedFlux playableFlux = new BufferedFlux(); return data.concatMap(playableFlux::write) .then(Mono.defer(() -> Mono.just(playableFlux.getDigest()))) .flatMap(dig -> this.registriesImpl.createManifestWithResponseAsync( repositoryName, dig, playableFlux.flush(), playableFlux.getSize(), UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context)) .flatMap(response -> { Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>( response.getRequest(), response.getStatusCode(), response.getHeaders(), new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()), response.getDeserializedHeaders()); return Mono.just(res); }).onErrorMap(UtilsImpl::mapException); } /** * Uploads a blob to the repository. * The client currently uploads the entire blob\layer as a single unit. * <p> * The blob is read into memory and then an upload operation is performed as a single operation. * We currently do not support breaking the layer into multiple chunks and uploading them one at a time * * @param data The blob\image content that needs to be uploaded. * @return The operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadBlobResult> uploadBlob(BinaryData data) { if (data == null) { throw logger.logExceptionAsError(new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadBlobWithResponse(data.toFluxByteBuffer(), context)).flatMap(FluxUtil::toMono); } /** * Uploads a blob to the repository. * The client currently uploads the entire blob\layer as a single unit. * <p> * The blob is read into memory and then an upload operation is performed as a single operation. * We currently do not support breaking the layer into multiple chunks and uploading them one at a time * * @param data The blob\image content that needs to be uploaded. * @return The rest response containing the operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) { return withContext(context -> this.uploadBlobWithResponse(data.toFluxByteBuffer(), context)); } Mono<Response<UploadBlobResult>> uploadBlobWithResponse(Flux<ByteBuffer> data, Context context) { if (data == null) { throw logger.logExceptionAsError(new NullPointerException("'data' can't be null.")); } BufferedFlux playableFlux = new BufferedFlux(); AtomicReference<String> digest = new AtomicReference<>(); return data.concatMap(playableFlux::write) .then(Mono.defer(() -> Mono.just(playableFlux.getDigest()))) .flatMap(dig -> { digest.set(dig); return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context); }) .flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), playableFlux.flush(), playableFlux.getSize(), context)) .flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest.get(), trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context)) .flatMap(completeUploadResponse -> { Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(), completeUploadResponse.getStatusCode(), completeUploadResponse.getHeaders(), new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()), completeUploadResponse.getDeserializedHeaders()); return Mono.just(res); }).onErrorMap(UtilsImpl::mapException); } private String trimNextLink(String locationHeader) { if (locationHeader.startsWith("/")) { return locationHeader.substring(1); } return locationHeader; } /** * Download the manifest associated with the given tag or digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param tagOrDigest The tag or digest of the manifest. * @return The manifest associated with the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<OciManifest> downloadManifest(String tagOrDigest) { return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono); } /** * Download the manifest associated with the given tag or digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param tagOrDigest The tag or digest of the manifest. * @return The response for the manifest associated with the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest) { return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context)); } Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) { if (tagOrDigest == null) { throw logger.logExceptionAsError(new NullPointerException("'tagOrDigest' can't be null.")); } return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context) .flatMap(response -> { String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME); ManifestWrapper wrapper = response.getValue(); if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) { OciManifest ociManifest = new OciManifest() .setAnnotations(wrapper.getAnnotations()) .setConfig(wrapper.getConfig()) .setLayers(wrapper.getLayers()) .setSchemaVersion(wrapper.getSchemaVersion()); Response<OciManifest> res = new ResponseBase<Void, OciManifest>( response.getRequest(), response.getStatusCode(), response.getHeaders(), ociManifest, null); return Mono.just(res); } else { return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest.")); } }).onErrorMap(UtilsImpl::mapException); } /** * Download the blob associated with the given digest. * * @param digest The digest for the given image layer. * @return The image associated with the given digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DownloadBlobResult> downloadBlob(String digest) { return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Download the blob\layer associated with the given digest. * * @param digest The digest for the given image layer. * @return The image associated with the given digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) { return withContext(context -> this.downloadBlobWithResponse(digest, context)); } Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) { if (digest == null) { throw logger.logExceptionAsError(new NullPointerException("'digest' can't be null.")); } return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context) .flatMap(streamResponse -> { String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME); Response<DownloadBlobResult> blobResult = new ResponseBase<>( streamResponse.getRequest(), streamResponse.getStatusCode(), streamResponse.getHeaders(), null, new DownloadBlobResult() .setContent(streamResponse.getValue()) .setDigest(resDigest)); return Mono.just(blobResult); }).onErrorMap(UtilsImpl::mapException); } /** * Delete the image associated with the given digest * * @param digest The digest for the given image layer. * @return The completion signal. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteBlob(String digest) { return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Delete the image associated with the given digest * * @param digest The digest for the given image layer. * @return The REST response for the completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteBlobWithResponse(String digest) { return withContext(context -> deleteBlobWithResponse(digest, context)); } Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) { if (digest == null) { throw logger.logExceptionAsError(new NullPointerException("'digest' can't be null.")); } return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context) .flatMap(UtilsImpl::deleteResponseToSuccess) .onErrorMap(UtilsImpl::mapException); } /** * Delete the manifest associated with the given digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param digest The digest of the manifest. * @return The completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteManifest(String digest) { return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Delete the manifest associated with the given digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param digest The digest of the manifest. * @return The REST response for completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteManifestWithResponse(String digest) { return withContext(context -> deleteManifestWithResponse(digest, context)); } Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) { return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context) .flatMap(UtilsImpl::deleteResponseToSuccess) .onErrorMap(UtilsImpl::mapException); } static final class BufferedFlux { int size; ByteBuffer byteBuffer; int getSize() { return this.size; } String getDigest() { return UtilsImpl.computeDigest(byteBuffer); } Flux<Void> write(ByteBuffer buffer) { size += buffer.remaining(); byteBuffer = buffer; return Flux.empty(); } Flux<ByteBuffer> flush() { return Flux.just(byteBuffer); } } }
class ContainerRegistryBlobAsyncClient { private final AzureContainerRegistryImpl registryImplClient; private final ContainerRegistryBlobsImpl blobsImpl; private final ContainerRegistriesImpl registriesImpl; private final String endpoint; private final String repositoryName; private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class); ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) { this.repositoryName = repositoryName; this.endpoint = endpoint; this.registryImplClient = new AzureContainerRegistryImplBuilder() .url(endpoint) .pipeline(httpPipeline) .apiVersion(version) .buildClient(); this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs(); this.registriesImpl = this.registryImplClient.getContainerRegistries(); } /** * This method returns the registry's repository on which operations are being performed. * * @return The name of the repository */ public String getRepositoryName() { return this.repositoryName; } /** * This method returns the complete registry endpoint. * * @return The registry endpoint including the authority. */ public String getEndpoint() { return this.endpoint; } /** * Upload the Oci manifest to the repository. * The upload is done as a single operation. * @see <a href="https: * @param manifest The OciManifest that needs to be uploaded. * @return operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code manifest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) /** * Uploads a manifest to the repository. * The client currently only supports uploading OciManifests to the repository. * And this operation makes the assumption that the data provided is a valid OCI manifest. * <p> * Also, the data is read into memory and then an upload operation is performed as a single operation. * @see <a href="https: * @param data The manifest that needs to be uploaded. * @return operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadManifestResult> uploadManifest(BinaryData data) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono); } /** * Uploads a manifest to the repository. * The client currently only supports uploading OciManifests to the repository. * And this operation makes the assumption that the data provided is a valid OCI manifest. * <p> * Also, the data is read into memory and then an upload operation is performed as a single operation. * @see <a href="https: * * @param data The manifest that needs to be uploaded. * @return The rest response containing the operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)); } Mono<Response<UploadManifestResult>> uploadManifestWithResponse(ByteBuffer data, Context context) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } String digest = UtilsImpl.computeDigest(data); return this.registriesImpl.createManifestWithResponseAsync( repositoryName, digest, Flux.just(data), data.remaining(), UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context).map(response -> { Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>( response.getRequest(), response.getStatusCode(), response.getHeaders(), new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()), response.getDeserializedHeaders()); return res; }).onErrorMap(UtilsImpl::mapException); } /** * Uploads a blob to the repository. * The client currently uploads the entire blob\layer as a single unit. * <p> * The blob is read into memory and then an upload operation is performed as a single operation. * We currently do not support breaking the layer into multiple chunks and uploading them one at a time * * @param data The blob\image content that needs to be uploaded. * @return The operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadBlobResult> uploadBlob(BinaryData data) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono); } /** * Uploads a blob to the repository. * The client currently uploads the entire blob\layer as a single unit. * <p> * The blob is read into memory and then an upload operation is performed as a single operation. * We currently do not support breaking the layer into multiple chunks and uploading them one at a time * * @param data The blob\image content that needs to be uploaded. * @return The rest response containing the operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) { return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)); } Mono<Response<UploadBlobResult>> uploadBlobWithResponse(ByteBuffer data, Context context) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } String digest = UtilsImpl.computeDigest(data); return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context) .flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), Flux.just(data), data.remaining(), context)) .flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest, trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context)) .flatMap(completeUploadResponse -> { Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(), completeUploadResponse.getStatusCode(), completeUploadResponse.getHeaders(), new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()), completeUploadResponse.getDeserializedHeaders()); return Mono.just(res); }).onErrorMap(UtilsImpl::mapException); } private String trimNextLink(String locationHeader) { if (locationHeader.startsWith("/")) { return locationHeader.substring(1); } return locationHeader; } /** * Download the manifest associated with the given tag or digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param tagOrDigest The tag or digest of the manifest. * @return The manifest associated with the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<OciManifest> downloadManifest(String tagOrDigest) { return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono); } /** * Download the manifest associated with the given tag or digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param tagOrDigest The tag or digest of the manifest. * @return The response for the manifest associated with the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest) { return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context)); } Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) { if (tagOrDigest == null) { return monoError(logger, new NullPointerException("'tagOrDigest' can't be null.")); } return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context) .flatMap(response -> { String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME); ManifestWrapper wrapper = response.getValue(); if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) { OciManifest ociManifest = new OciManifest() .setAnnotations(wrapper.getAnnotations()) .setConfig(wrapper.getConfig()) .setLayers(wrapper.getLayers()) .setSchemaVersion(wrapper.getSchemaVersion()); Response<OciManifest> res = new ResponseBase<Void, OciManifest>( response.getRequest(), response.getStatusCode(), response.getHeaders(), ociManifest, null); return Mono.just(res); } else { return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest.")); } }).onErrorMap(UtilsImpl::mapException); } /** * Download the blob associated with the given digest. * * @param digest The digest for the given image layer. * @return The image associated with the given digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DownloadBlobResult> downloadBlob(String digest) { return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Download the blob\layer associated with the given digest. * * @param digest The digest for the given image layer. * @return The image associated with the given digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) { return withContext(context -> this.downloadBlobWithResponse(digest, context)); } Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) { if (digest == null) { return monoError(logger, new NullPointerException("'digest' can't be null.")); } return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context).flatMap(streamResponse -> { String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME); return BinaryData.fromFlux(streamResponse.getValue()) .flatMap(binaryData -> { Response<DownloadBlobResult> response = new ResponseBase<HttpHeaders, DownloadBlobResult>( streamResponse.getRequest(), streamResponse.getStatusCode(), streamResponse.getHeaders(), new DownloadBlobResult(resDigest, binaryData), null); return Mono.just(response); }); }).onErrorMap(UtilsImpl::mapException); } /** * Delete the image associated with the given digest * * @param digest The digest for the given image layer. * @return The completion signal. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteBlob(String digest) { return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Delete the image associated with the given digest * * @param digest The digest for the given image layer. * @return The REST response for the completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteBlobWithResponse(String digest) { return withContext(context -> deleteBlobWithResponse(digest, context)); } Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) { if (digest == null) { return monoError(logger, new NullPointerException("'digest' can't be null.")); } return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context) .flatMap(UtilsImpl::deleteResponseToSuccess) .onErrorMap(UtilsImpl::mapException); } /** * Delete the manifest associated with the given digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param digest The digest of the manifest. * @return The completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteManifest(String digest) { return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Delete the manifest associated with the given digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param digest The digest of the manifest. * @return The REST response for completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteManifestWithResponse(String digest) { return withContext(context -> deleteManifestWithResponse(digest, context)); } Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) { return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context) .flatMap(UtilsImpl::deleteResponseToSuccess) .onErrorMap(UtilsImpl::mapException); } }
Consider using `UncheckedIOException` when wrapping `IOException`. #Resolved
public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) { if (manifest == null) { return monoError(logger, new NullPointerException("'manifest' can't be null.")); } try { byte[] bytes = this.registryImplClient.getSerializerAdapter().serializeToBytes(manifest, SerializerEncoding.JSON); return withContext(context -> this.uploadManifestWithResponse(Flux.just(ByteBuffer.wrap(bytes)), context)).flatMap(FluxUtil::toMono); } catch (IOException exception) { return monoError(logger, new RuntimeException(exception.getMessage())); } }
return monoError(logger, new RuntimeException(exception.getMessage()));
public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) { if (manifest == null) { return monoError(logger, new NullPointerException("'manifest' can't be null.")); } return uploadManifest(BinaryData.fromObject(manifest)); }
class ContainerRegistryBlobAsyncClient { private final AzureContainerRegistryImpl registryImplClient; private final ContainerRegistryBlobsImpl blobsImpl; private final ContainerRegistriesImpl registriesImpl; private final String endpoint; private final String repositoryName; private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class); ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) { this.repositoryName = repositoryName; this.endpoint = endpoint; this.registryImplClient = new AzureContainerRegistryImplBuilder() .url(endpoint) .pipeline(httpPipeline) .apiVersion(version) .buildClient(); this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs(); this.registriesImpl = this.registryImplClient.getContainerRegistries(); } /** * This method returns the registry's repository on which operations are being performed. * * @return The name of the repository */ public String getRepositoryName() { return this.repositoryName; } /** * This method returns the complete registry endpoint. * * @return The registry endpoint including the authority. */ public String getEndpoint() { return this.endpoint; } /** * Upload the Oci manifest to the repository. * The upload is done as a single operation. * @see <a href="https: * @param manifest The OciManifest that needs to be uploaded. * @return operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code manifest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) /** * Uploads a manifest to the repository. * The client currently only supports uploading OciManifests to the repository. * And this operation makes the assumption that the data provided is a valid OCI manifest. * <p> * Also, the data is read into memory and then an upload operation is performed as a single operation. * @see <a href="https: * @param data The manifest that needs to be uploaded. * @return operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadManifestResult> uploadManifest(BinaryData data) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(data.toFluxByteBuffer(), context)).flatMap(FluxUtil::toMono); } /** * Uploads a manifest to the repository. * The client currently only supports uploading OciManifests to the repository. * And this operation makes the assumption that the data provided is a valid OCI manifest. * <p> * Also, the data is read into memory and then an upload operation is performed as a single operation. * @see <a href="https: * * @param data The manifest that needs to be uploaded. * @return The rest response containing the operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(data.toFluxByteBuffer(), context)); } Mono<Response<UploadManifestResult>> uploadManifestWithResponse(Flux<ByteBuffer> data, Context context) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } BufferedFlux playableFlux = new BufferedFlux(); return data.concatMap(playableFlux::write) .then(Mono.defer(() -> Mono.just(playableFlux.getDigest()))) .flatMap(dig -> this.registriesImpl.createManifestWithResponseAsync( repositoryName, dig, playableFlux.flush(), playableFlux.getSize(), UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context)) .flatMap(response -> { Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>( response.getRequest(), response.getStatusCode(), response.getHeaders(), new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()), response.getDeserializedHeaders()); return Mono.just(res); }).onErrorMap(UtilsImpl::mapException); } /** * Uploads a blob to the repository. * The client currently uploads the entire blob\layer as a single unit. * <p> * The blob is read into memory and then an upload operation is performed as a single operation. * We currently do not support breaking the layer into multiple chunks and uploading them one at a time * * @param data The blob\image content that needs to be uploaded. * @return The operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadBlobResult> uploadBlob(BinaryData data) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadBlobWithResponse(data.toFluxByteBuffer(), context)).flatMap(FluxUtil::toMono); } /** * Uploads a blob to the repository. * The client currently uploads the entire blob\layer as a single unit. * <p> * The blob is read into memory and then an upload operation is performed as a single operation. * We currently do not support breaking the layer into multiple chunks and uploading them one at a time * * @param data The blob\image content that needs to be uploaded. * @return The rest response containing the operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) { return withContext(context -> this.uploadBlobWithResponse(data.toFluxByteBuffer(), context)); } Mono<Response<UploadBlobResult>> uploadBlobWithResponse(Flux<ByteBuffer> data, Context context) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } BufferedFlux playableFlux = new BufferedFlux(); AtomicReference<String> digest = new AtomicReference<>(); return data.concatMap(playableFlux::write) .then(Mono.defer(() -> Mono.just(playableFlux.getDigest()))) .flatMap(dig -> { digest.set(dig); return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context); }) .flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), playableFlux.flush(), playableFlux.getSize(), context)) .flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest.get(), trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context)) .flatMap(completeUploadResponse -> { Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(), completeUploadResponse.getStatusCode(), completeUploadResponse.getHeaders(), new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()), completeUploadResponse.getDeserializedHeaders()); return Mono.just(res); }).onErrorMap(UtilsImpl::mapException); } private String trimNextLink(String locationHeader) { if (locationHeader.startsWith("/")) { return locationHeader.substring(1); } return locationHeader; } /** * Download the manifest associated with the given tag or digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param tagOrDigest The tag or digest of the manifest. * @return The manifest associated with the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<OciManifest> downloadManifest(String tagOrDigest) { return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono); } /** * Download the manifest associated with the given tag or digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param tagOrDigest The tag or digest of the manifest. * @return The response for the manifest associated with the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest) { return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context)); } Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) { if (tagOrDigest == null) { return monoError(logger, new NullPointerException("'tagOrDigest' can't be null.")); } return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context) .flatMap(response -> { String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME); ManifestWrapper wrapper = response.getValue(); if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) { OciManifest ociManifest = new OciManifest() .setAnnotations(wrapper.getAnnotations()) .setConfig(wrapper.getConfig()) .setLayers(wrapper.getLayers()) .setSchemaVersion(wrapper.getSchemaVersion()); Response<OciManifest> res = new ResponseBase<Void, OciManifest>( response.getRequest(), response.getStatusCode(), response.getHeaders(), ociManifest, null); return Mono.just(res); } else { return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest.")); } }).onErrorMap(UtilsImpl::mapException); } /** * Download the blob associated with the given digest. * * @param digest The digest for the given image layer. * @return The image associated with the given digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DownloadBlobResult> downloadBlob(String digest) { return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Download the blob\layer associated with the given digest. * * @param digest The digest for the given image layer. * @return The image associated with the given digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) { return withContext(context -> this.downloadBlobWithResponse(digest, context)); } Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) { if (digest == null) { return monoError(logger, new NullPointerException("'digest' can't be null.")); } return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context) .flatMap(streamResponse -> { String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME); Response<DownloadBlobResult> blobResult = new ResponseBase<>( streamResponse.getRequest(), streamResponse.getStatusCode(), streamResponse.getHeaders(), null, new DownloadBlobResult() .setContent(streamResponse.getValue()) .setDigest(resDigest)); return Mono.just(blobResult); }).onErrorMap(UtilsImpl::mapException); } /** * Delete the image associated with the given digest * * @param digest The digest for the given image layer. * @return The completion signal. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteBlob(String digest) { return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Delete the image associated with the given digest * * @param digest The digest for the given image layer. * @return The REST response for the completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteBlobWithResponse(String digest) { return withContext(context -> deleteBlobWithResponse(digest, context)); } Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) { if (digest == null) { return monoError(logger, new NullPointerException("'digest' can't be null.")); } return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context) .flatMap(UtilsImpl::deleteResponseToSuccess) .onErrorMap(UtilsImpl::mapException); } /** * Delete the manifest associated with the given digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param digest The digest of the manifest. * @return The completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteManifest(String digest) { return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Delete the manifest associated with the given digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param digest The digest of the manifest. * @return The REST response for completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteManifestWithResponse(String digest) { return withContext(context -> deleteManifestWithResponse(digest, context)); } Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) { return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context) .flatMap(UtilsImpl::deleteResponseToSuccess) .onErrorMap(UtilsImpl::mapException); } static final class BufferedFlux { int size; ByteBuffer byteBuffer; int getSize() { return this.size; } String getDigest() { return UtilsImpl.computeDigest(byteBuffer); } Flux<Void> write(ByteBuffer buffer) { size += buffer.remaining(); byteBuffer = buffer; return Flux.empty(); } Flux<ByteBuffer> flush() { return Flux.just(byteBuffer); } } }
class ContainerRegistryBlobAsyncClient { private final AzureContainerRegistryImpl registryImplClient; private final ContainerRegistryBlobsImpl blobsImpl; private final ContainerRegistriesImpl registriesImpl; private final String endpoint; private final String repositoryName; private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class); ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) { this.repositoryName = repositoryName; this.endpoint = endpoint; this.registryImplClient = new AzureContainerRegistryImplBuilder() .url(endpoint) .pipeline(httpPipeline) .apiVersion(version) .buildClient(); this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs(); this.registriesImpl = this.registryImplClient.getContainerRegistries(); } /** * This method returns the registry's repository on which operations are being performed. * * @return The name of the repository */ public String getRepositoryName() { return this.repositoryName; } /** * This method returns the complete registry endpoint. * * @return The registry endpoint including the authority. */ public String getEndpoint() { return this.endpoint; } /** * Upload the Oci manifest to the repository. * The upload is done as a single operation. * @see <a href="https: * @param manifest The OciManifest that needs to be uploaded. * @return operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code manifest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) /** * Uploads a manifest to the repository. * The client currently only supports uploading OciManifests to the repository. * And this operation makes the assumption that the data provided is a valid OCI manifest. * <p> * Also, the data is read into memory and then an upload operation is performed as a single operation. * @see <a href="https: * @param data The manifest that needs to be uploaded. * @return operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadManifestResult> uploadManifest(BinaryData data) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono); } /** * Uploads a manifest to the repository. * The client currently only supports uploading OciManifests to the repository. * And this operation makes the assumption that the data provided is a valid OCI manifest. * <p> * Also, the data is read into memory and then an upload operation is performed as a single operation. * @see <a href="https: * * @param data The manifest that needs to be uploaded. * @return The rest response containing the operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)); } Mono<Response<UploadManifestResult>> uploadManifestWithResponse(ByteBuffer data, Context context) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } String digest = UtilsImpl.computeDigest(data); return this.registriesImpl.createManifestWithResponseAsync( repositoryName, digest, Flux.just(data), data.remaining(), UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context).map(response -> { Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>( response.getRequest(), response.getStatusCode(), response.getHeaders(), new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()), response.getDeserializedHeaders()); return res; }).onErrorMap(UtilsImpl::mapException); } /** * Uploads a blob to the repository. * The client currently uploads the entire blob\layer as a single unit. * <p> * The blob is read into memory and then an upload operation is performed as a single operation. * We currently do not support breaking the layer into multiple chunks and uploading them one at a time * * @param data The blob\image content that needs to be uploaded. * @return The operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadBlobResult> uploadBlob(BinaryData data) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono); } /** * Uploads a blob to the repository. * The client currently uploads the entire blob\layer as a single unit. * <p> * The blob is read into memory and then an upload operation is performed as a single operation. * We currently do not support breaking the layer into multiple chunks and uploading them one at a time * * @param data The blob\image content that needs to be uploaded. * @return The rest response containing the operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) { return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)); } Mono<Response<UploadBlobResult>> uploadBlobWithResponse(ByteBuffer data, Context context) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } String digest = UtilsImpl.computeDigest(data); return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context) .flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), Flux.just(data), data.remaining(), context)) .flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest, trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context)) .flatMap(completeUploadResponse -> { Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(), completeUploadResponse.getStatusCode(), completeUploadResponse.getHeaders(), new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()), completeUploadResponse.getDeserializedHeaders()); return Mono.just(res); }).onErrorMap(UtilsImpl::mapException); } private String trimNextLink(String locationHeader) { if (locationHeader.startsWith("/")) { return locationHeader.substring(1); } return locationHeader; } /** * Download the manifest associated with the given tag or digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param tagOrDigest The tag or digest of the manifest. * @return The manifest associated with the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<OciManifest> downloadManifest(String tagOrDigest) { return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono); } /** * Download the manifest associated with the given tag or digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param tagOrDigest The tag or digest of the manifest. * @return The response for the manifest associated with the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest) { return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context)); } Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) { if (tagOrDigest == null) { return monoError(logger, new NullPointerException("'tagOrDigest' can't be null.")); } return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context) .flatMap(response -> { String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME); ManifestWrapper wrapper = response.getValue(); if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) { OciManifest ociManifest = new OciManifest() .setAnnotations(wrapper.getAnnotations()) .setConfig(wrapper.getConfig()) .setLayers(wrapper.getLayers()) .setSchemaVersion(wrapper.getSchemaVersion()); Response<OciManifest> res = new ResponseBase<Void, OciManifest>( response.getRequest(), response.getStatusCode(), response.getHeaders(), ociManifest, null); return Mono.just(res); } else { return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest.")); } }).onErrorMap(UtilsImpl::mapException); } /** * Download the blob associated with the given digest. * * @param digest The digest for the given image layer. * @return The image associated with the given digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DownloadBlobResult> downloadBlob(String digest) { return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Download the blob\layer associated with the given digest. * * @param digest The digest for the given image layer. * @return The image associated with the given digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) { return withContext(context -> this.downloadBlobWithResponse(digest, context)); } Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) { if (digest == null) { return monoError(logger, new NullPointerException("'digest' can't be null.")); } return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context).flatMap(streamResponse -> { String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME); return BinaryData.fromFlux(streamResponse.getValue()) .flatMap(binaryData -> { Response<DownloadBlobResult> response = new ResponseBase<HttpHeaders, DownloadBlobResult>( streamResponse.getRequest(), streamResponse.getStatusCode(), streamResponse.getHeaders(), new DownloadBlobResult(resDigest, binaryData), null); return Mono.just(response); }); }).onErrorMap(UtilsImpl::mapException); } /** * Delete the image associated with the given digest * * @param digest The digest for the given image layer. * @return The completion signal. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteBlob(String digest) { return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Delete the image associated with the given digest * * @param digest The digest for the given image layer. * @return The REST response for the completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteBlobWithResponse(String digest) { return withContext(context -> deleteBlobWithResponse(digest, context)); } Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) { if (digest == null) { return monoError(logger, new NullPointerException("'digest' can't be null.")); } return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context) .flatMap(UtilsImpl::deleteResponseToSuccess) .onErrorMap(UtilsImpl::mapException); } /** * Delete the manifest associated with the given digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param digest The digest of the manifest. * @return The completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteManifest(String digest) { return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Delete the manifest associated with the given digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param digest The digest of the manifest. * @return The REST response for completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteManifestWithResponse(String digest) { return withContext(context -> deleteManifestWithResponse(digest, context)); } Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) { return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context) .flatMap(UtilsImpl::deleteResponseToSuccess) .onErrorMap(UtilsImpl::mapException); } }
`audience` should be optional and if not set, we should use the default (public cloud) audience. #Resolved
public ContainerRegistryBlobAsyncClient buildAsyncClient() { Objects.requireNonNull(endpoint, "'endpoint' can't be null"); Objects.requireNonNull(audience, "'audience' can't be null"); ContainerRegistryServiceVersion serviceVersion = (version != null) ? version : ContainerRegistryServiceVersion.getLatest(); HttpPipeline pipeline = getHttpPipeline(); ContainerRegistryBlobAsyncClient client = new ContainerRegistryBlobAsyncClient(repositoryName, pipeline, endpoint, serviceVersion.getVersion()); return client; }
Objects.requireNonNull(audience, "'audience' can't be null");
public ContainerRegistryBlobAsyncClient buildAsyncClient() { Objects.requireNonNull(endpoint, "'endpoint' can't be null"); ContainerRegistryServiceVersion serviceVersion = (version != null) ? version : ContainerRegistryServiceVersion.getLatest(); HttpPipeline pipeline = getHttpPipeline(); ContainerRegistryBlobAsyncClient client = new ContainerRegistryBlobAsyncClient(repositoryName, pipeline, endpoint, serviceVersion.getVersion()); return client; }
class ContainerRegistryBlobClientBuilder implements ConfigurationTrait<ContainerRegistryBlobClientBuilder>, EndpointTrait<ContainerRegistryBlobClientBuilder>, HttpTrait<ContainerRegistryBlobClientBuilder>, TokenCredentialTrait<ContainerRegistryBlobClientBuilder> { private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobClientBuilder.class); private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>(); private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>(); private ClientOptions clientOptions; private Configuration configuration; private String endpoint; private HttpClient httpClient; private TokenCredential credential; private HttpPipeline httpPipeline; private HttpLogOptions httpLogOptions; private RetryPolicy retryPolicy; private RetryOptions retryOptions; private ContainerRegistryServiceVersion version; private ContainerRegistryAudience audience; private String repositoryName; /** * Sets the service endpoint for the Azure Container Registry instance. * * @param endpoint The URL of the Container Registry instance. * @return The updated {@link ContainerRegistryBlobClientBuilder} object. * @throws IllegalArgumentException If {@code endpoint} is null or it cannot be parsed into a valid URL. */ public ContainerRegistryBlobClientBuilder endpoint(String endpoint) { try { new URL(endpoint); } catch (MalformedURLException ex) { throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL")); } this.endpoint = endpoint; return this; } /** * Sets the repository name for the Azure Container Registry Blob instance. * * @param repositoryName The URL of the Container Registry instance. * @return The updated {@link ContainerRegistryBlobClientBuilder} object. */ public ContainerRegistryBlobClientBuilder repository(String repositoryName) { this.repositoryName = repositoryName; return this; } /** * Sets the audience for the Azure Container Registry service. * * @param audience ARM management scope associated with the given registry. * @throws NullPointerException If {@code audience} is null. * @return The updated {@link ContainerRegistryBlobClientBuilder} object. */ public ContainerRegistryBlobClientBuilder audience(ContainerRegistryAudience audience) { Objects.requireNonNull(audience, "'audience' can't be null"); this.audience = audience; return this; } /** * Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java * <a href="https: * documentation for more details on proper usage of the {@link TokenCredential} type. * * @param credential {@link TokenCredential} used to authorize requests sent to the service. * @return The updated {@link ContainerRegistryBlobClientBuilder} object. */ @Override public ContainerRegistryBlobClientBuilder credential(TokenCredential credential) { this.credential = credential; return this; } /** * Sets the {@link HttpPipeline} to use for the service client. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * <p> * If {@code pipeline} is set, all settings other than {@link * to build {@link ContainerRegistryBlobClient} or {@link ContainerRegistryBlobAsyncClient}.<br> * </p> * * This service takes dependency on an internal policy which converts Azure token credentials into Azure Container Registry specific service credentials. * In case you use your own pipeline you will have to create your own credential policy.<br> * * {For more information please see <a href="https: * * @param httpPipeline {@link HttpPipeline} to use for sending service requests and receiving responses. * @return The updated {@link ContainerRegistryClientBuilder} object. */ @Override public ContainerRegistryBlobClientBuilder pipeline(HttpPipeline httpPipeline) { if (this.httpPipeline != null && httpPipeline == null) { logger.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.httpPipeline = httpPipeline; return this; } /** * Sets the {@link ContainerRegistryServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service version and so * newer version of the client library may result in moving to a newer service version. * * @param version {@link ContainerRegistryServiceVersion} of the service to be used when making requests. * @return The updated {@link ContainerRegistryBlobClientBuilder} object. */ public ContainerRegistryBlobClientBuilder serviceVersion(ContainerRegistryServiceVersion version) { this.version = version; return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param httpClient The HTTP client to use for requests. * @return The updated {@link ContainerRegistryBlobClientBuilder} object. */ public ContainerRegistryBlobClientBuilder httpClient(HttpClient httpClient) { if (this.httpClient != null && httpClient == null) { logger.info("HttpClient is being set to 'null' when it was previously configured."); } this.httpClient = httpClient; return this; } /** * Sets the {@link ClientOptions} which enables various options to be set on the client. For example setting an * {@code applicationId} using {@link ClientOptions * the {@link UserAgentPolicy} for telemetry/monitoring purposes. * * <p>More About <a href="https: * * @param clientOptions {@link ClientOptions}. * * @return the updated {@link ContainerRegistryBlobClientBuilder} object */ public ContainerRegistryBlobClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /** * Sets the configuration store that is used during construction of the service client. * * <p>The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store to be used. * @return The updated {@link ContainerRegistryBlobClientBuilder} object. */ public ContainerRegistryBlobClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the logging configuration for HTTP requests and responses. * * <p> If logLevel is not provided, HTTP request or response logging will not happen.</p> * * @param httpLogOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return The updated {@link ContainerRegistryBlobClientBuilder} object. */ public ContainerRegistryBlobClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) { this.httpLogOptions = httpLogOptions; return this; } /** * Sets the {@link HttpPipelinePolicy} that is used to retry requests. * <p> * The default retry policy will be used if not provided {@link * build {@link ContainerRegistryBlobAsyncClient}. * * @param retryPolicy The {@link HttpPipelinePolicy} that will be used to retry requests. For example, * {@link RetryPolicy} can be used to retry requests. * * @return The updated ContainerRegistryBlobClientBuilder object. */ public ContainerRegistryBlobClientBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Sets the {@link RetryOptions} for all the requests made through the client. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * <p> * Setting this is mutually exclusive with using {@link * * @param retryOptions The {@link RetryOptions} to use for all the requests made through the client. * * @return The updated ContainerRegistryClientBuilder object. */ @Override public ContainerRegistryBlobClientBuilder retryOptions(RetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } /** * Adds a policy to the set of existing policies. * * @param policy The policy for service requests. * @return The updated ContainerRegistryBlobClientBuilder object. * @throws NullPointerException If {@code policy} is null. */ public ContainerRegistryBlobClientBuilder addPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy, "'policy' cannot be null."); if (policy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) { perCallPolicies.add(policy); } else { perRetryPolicies.add(policy); } return this; } /** * Creates a {@link ContainerRegistryBlobAsyncClient} based on options set in the Builder. Every time {@code * buildAsyncClient()} is called a new instance of {@link ContainerRegistryBlobAsyncClient} is created. * <p> * If {@link
class that implements this HttpTrait * interface. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline}
If this just returns URL to documentation, we should call this `getDocumentationLink()` or something similar. Users may expect the documentation itself to be returned when they see this method name and `String` return type. #Resolved
public String getDocumentation() { return this.documentation; }
}
public String getDocumentation() { return this.documentation; }
class OciAnnotations { /* * Date and time on which the image was built (string, date-time as defined * by https: */ @JsonProperty(value = "org.opencontainers.image.created") private OffsetDateTime created; /* * Contact details of the people or organization responsible for the image. */ @JsonProperty(value = "org.opencontainers.image.authors") private String authors; /* * URL to find more information on the image. */ @JsonProperty(value = "org.opencontainers.image.url") private String url; /* * URL to get documentation on the image. */ @JsonProperty(value = "org.opencontainers.image.documentation") private String documentation; /* * URL to get source code for building the image. */ @JsonProperty(value = "org.opencontainers.image.source") private String source; /* * Version of the packaged software. The version MAY match a label or tag * in the source code repository, may also be Semantic * versioning-compatible */ @JsonProperty(value = "org.opencontainers.image.version") private String version; /* * Source control revision identifier for the packaged software. */ @JsonProperty(value = "org.opencontainers.image.revision") private String revision; /* * Name of the distributing entity, organization or individual. */ @JsonProperty(value = "org.opencontainers.image.vendor") private String vendor; /* * License(s) under which contained software is distributed as an SPDX * License Expression. */ @JsonProperty(value = "org.opencontainers.image.licenses") private String licenses; /* * Name of the reference for a target. */ @JsonProperty(value = "org.opencontainers.image.ref.name") private String name; /* * Human-readable title of the image */ @JsonProperty(value = "org.opencontainers.image.title") private String title; /* * Human-readable description of the software packaged in the image */ @JsonProperty(value = "org.opencontainers.image.description") private String description; /* * Additional information provided through arbitrary metadata. */ @JsonIgnore private Map<String, Object> additionalProperties; /** * Get the created property: Date and time on which the image was built (string, date-time as defined by * https: * * @return the created value. */ public OffsetDateTime getCreated() { return this.created; } /** * Set the created property: Date and time on which the image was built (string, date-time as defined by * https: * * @param created the created value to set. * @return the OciAnnotations object itself. */ public OciAnnotations setCreated(OffsetDateTime created) { this.created = created; return this; } /** * Get the authors property: Contact details of the people or organization responsible for the image. * * @return the authors value. */ public String getAuthors() { return this.authors; } /** * Set the authors property: Contact details of the people or organization responsible for the image. * * @param authors the authors value to set. * @return the OciAnnotations object itself. */ public OciAnnotations setAuthors(String authors) { this.authors = authors; return this; } /** * Get the url property: URL to find more information on the image. * * @return the url value. */ public String getUrl() { return this.url; } /** * Set the url property: URL to find more information on the image. * * @param url the url value to set. * @return the OciAnnotations object itself. */ public OciAnnotations setUrl(String url) { this.url = url; return this; } /** * Get the documentation property: URL to get documentation on the image. * * @return the documentation value. */ /** * Set the documentation property: URL to get documentation on the image. * * @param documentation the documentation value to set. * @return the OciAnnotations object itself. */ public OciAnnotations setDocumentation(String documentation) { this.documentation = documentation; return this; } /** * Get the source property: URL to get source code for building the image. * * @return the source value. */ public String getSource() { return this.source; } /** * Set the source property: URL to get source code for building the image. * * @param source the source value to set. * @return the OciAnnotations object itself. */ public OciAnnotations setSource(String source) { this.source = source; return this; } /** * Get the version property: Version of the packaged software. The version MAY match a label or tag in the source * code repository, may also be Semantic versioning-compatible. * * @return the version value. */ public String getVersion() { return this.version; } /** * Set the version property: Version of the packaged software. The version MAY match a label or tag in the source * code repository, may also be Semantic versioning-compatible. * * @param version the version value to set. * @return the OciAnnotations object itself. */ public OciAnnotations setVersion(String version) { this.version = version; return this; } /** * Get the revision property: Source control revision identifier for the packaged software. * * @return the revision value. */ public String getRevision() { return this.revision; } /** * Set the revision property: Source control revision identifier for the packaged software. * * @param revision the revision value to set. * @return the OciAnnotations object itself. */ public OciAnnotations setRevision(String revision) { this.revision = revision; return this; } /** * Get the vendor property: Name of the distributing entity, organization or individual. * * @return the vendor value. */ public String getVendor() { return this.vendor; } /** * Set the vendor property: Name of the distributing entity, organization or individual. * * @param vendor the vendor value to set. * @return the OciAnnotations object itself. */ public OciAnnotations setVendor(String vendor) { this.vendor = vendor; return this; } /** * Get the licenses property: License(s) under which contained software is distributed as an SPDX License * Expression. * * @return the licenses value. */ public String getLicenses() { return this.licenses; } /** * Set the licenses property: License(s) under which contained software is distributed as an SPDX License * Expression. * * @param licenses the licenses value to set. * @return the OciAnnotations object itself. */ public OciAnnotations setLicenses(String licenses) { this.licenses = licenses; return this; } /** * Get the name property: Name of the reference for a target. * * @return the name value. */ public String getName() { return this.name; } /** * Set the name property: Name of the reference for a target. * * @param name the name value to set. * @return the OciAnnotations object itself. */ public OciAnnotations setName(String name) { this.name = name; return this; } /** * Get the title property: Human-readable title of the image. * * @return the title value. */ public String getTitle() { return this.title; } /** * Set the title property: Human-readable title of the image. * * @param title the title value to set. * @return the OciAnnotations object itself. */ public OciAnnotations setTitle(String title) { this.title = title; return this; } /** * Get the description property: Human-readable description of the software packaged in the image. * * @return the description value. */ public String getDescription() { return this.description; } /** * Set the description property: Human-readable description of the software packaged in the image. * * @param description the description value to set. * @return the OciAnnotations object itself. */ public OciAnnotations setDescription(String description) { this.description = description; return this; } /** * Get the additionalProperties property: Additional information provided through arbitrary metadata. * * @return the additionalProperties value. */ @JsonAnyGetter public Map<String, Object> getAdditionalProperties() { return this.additionalProperties; } /** * Set the additionalProperties property: Additional information provided through arbitrary metadata. * * @param additionalProperties the additionalProperties value to set. * @return the OciAnnotations object itself. */ public OciAnnotations setAdditionalProperties(Map<String, Object> additionalProperties) { this.additionalProperties = additionalProperties; return this; } @JsonAnySetter void setAdditionalProperties(String key, Object value) { if (additionalProperties == null) { additionalProperties = new HashMap<>(); } additionalProperties.put(key, value); } }
class OciAnnotations { /* * Date and time on which the image was built (string, date-time as defined * by https: */ @JsonProperty(value = "org.opencontainers.image.created") private OffsetDateTime created; /* * Contact details of the people or organization responsible for the image. */ @JsonProperty(value = "org.opencontainers.image.authors") private String authors; /* * URL to find more information on the image. */ @JsonProperty(value = "org.opencontainers.image.url") private String url; /* * URL to get documentation on the image. */ @JsonProperty(value = "org.opencontainers.image.documentation") private String documentation; /* * URL to get source code for building the image. */ @JsonProperty(value = "org.opencontainers.image.source") private String source; /* * Version of the packaged software. The version MAY match a label or tag * in the source code repository, may also be Semantic * versioning-compatible */ @JsonProperty(value = "org.opencontainers.image.version") private String version; /* * Source control revision identifier for the packaged software. */ @JsonProperty(value = "org.opencontainers.image.revision") private String revision; /* * Name of the distributing entity, organization or individual. */ @JsonProperty(value = "org.opencontainers.image.vendor") private String vendor; /* * License(s) under which contained software is distributed as an SPDX * License Expression. */ @JsonProperty(value = "org.opencontainers.image.licenses") private String licenses; /* * Name of the reference for a target. */ @JsonProperty(value = "org.opencontainers.image.ref.name") private String name; /* * Human-readable title of the image */ @JsonProperty(value = "org.opencontainers.image.title") private String title; /* * Human-readable description of the software packaged in the image */ @JsonProperty(value = "org.opencontainers.image.description") private String description; /* * Additional information provided through arbitrary metadata. */ @JsonIgnore private Map<String, Object> additionalProperties; /** * Get the created property: Date and time on which the image was built (string, date-time as defined by * https: * * @return the created value. */ public OffsetDateTime getCreated() { return this.created; } /** * Set the created property: Date and time on which the image was built (string, date-time as defined by * https: * * @param created the created value to set. * @return the OciAnnotations object itself. */ public OciAnnotations setCreated(OffsetDateTime created) { this.created = created; return this; } /** * Get the authors property: Contact details of the people or organization responsible for the image. * * @return the authors value. */ public String getAuthors() { return this.authors; } /** * Set the authors property: Contact details of the people or organization responsible for the image. * * @param authors the authors value to set. * @return the OciAnnotations object itself. */ public OciAnnotations setAuthors(String authors) { this.authors = authors; return this; } /** * Get the url property: URL to find more information on the image. * * @return the url value. */ public String getUrl() { return this.url; } /** * Set the url property: URL to find more information on the image. * * @param url the url value to set. * @return the OciAnnotations object itself. */ public OciAnnotations setUrl(String url) { this.url = url; return this; } /** * Get the documentation property: URL to get documentation on the image. * * @return the documentation value. */ /** * Set the documentation property: URL to get documentation on the image. * * @param documentation the documentation value to set. * @return the OciAnnotations object itself. */ public OciAnnotations setDocumentation(String documentation) { this.documentation = documentation; return this; } /** * Get the source property: URL to get source code for building the image. * * @return the source value. */ public String getSource() { return this.source; } /** * Set the source property: URL to get source code for building the image. * * @param source the source value to set. * @return the OciAnnotations object itself. */ public OciAnnotations setSource(String source) { this.source = source; return this; } /** * Get the version property: Version of the packaged software. The version MAY match a label or tag in the source * code repository, may also be Semantic versioning-compatible. * * @return the version value. */ public String getVersion() { return this.version; } /** * Set the version property: Version of the packaged software. The version MAY match a label or tag in the source * code repository, may also be Semantic versioning-compatible. * * @param version the version value to set. * @return the OciAnnotations object itself. */ public OciAnnotations setVersion(String version) { this.version = version; return this; } /** * Get the revision property: Source control revision identifier for the packaged software. * * @return the revision value. */ public String getRevision() { return this.revision; } /** * Set the revision property: Source control revision identifier for the packaged software. * * @param revision the revision value to set. * @return the OciAnnotations object itself. */ public OciAnnotations setRevision(String revision) { this.revision = revision; return this; } /** * Get the vendor property: Name of the distributing entity, organization or individual. * * @return the vendor value. */ public String getVendor() { return this.vendor; } /** * Set the vendor property: Name of the distributing entity, organization or individual. * * @param vendor the vendor value to set. * @return the OciAnnotations object itself. */ public OciAnnotations setVendor(String vendor) { this.vendor = vendor; return this; } /** * Get the licenses property: License(s) under which contained software is distributed as an SPDX License * Expression. * * @return the licenses value. */ public String getLicenses() { return this.licenses; } /** * Set the licenses property: License(s) under which contained software is distributed as an SPDX License * Expression. * * @param licenses the licenses value to set. * @return the OciAnnotations object itself. */ public OciAnnotations setLicenses(String licenses) { this.licenses = licenses; return this; } /** * Get the name property: Name of the reference for a target. * * @return the name value. */ public String getName() { return this.name; } /** * Set the name property: Name of the reference for a target. * * @param name the name value to set. * @return the OciAnnotations object itself. */ public OciAnnotations setName(String name) { this.name = name; return this; } /** * Get the title property: Human-readable title of the image. * * @return the title value. */ public String getTitle() { return this.title; } /** * Set the title property: Human-readable title of the image. * * @param title the title value to set. * @return the OciAnnotations object itself. */ public OciAnnotations setTitle(String title) { this.title = title; return this; } /** * Get the description property: Human-readable description of the software packaged in the image. * * @return the description value. */ public String getDescription() { return this.description; } /** * Set the description property: Human-readable description of the software packaged in the image. * * @param description the description value to set. * @return the OciAnnotations object itself. */ public OciAnnotations setDescription(String description) { this.description = description; return this; } /** * Get the additionalProperties property: Additional information provided through arbitrary metadata. * * @return the additionalProperties value. */ @JsonAnyGetter public Map<String, Object> getAdditionalProperties() { return this.additionalProperties; } /** * Set the additionalProperties property: Additional information provided through arbitrary metadata. * * @param additionalProperties the additionalProperties value to set. * @return the OciAnnotations object itself. */ public OciAnnotations setAdditionalProperties(Map<String, Object> additionalProperties) { this.additionalProperties = additionalProperties; return this; } @JsonAnySetter void setAdditionalProperties(String key, Object value) { if (additionalProperties == null) { additionalProperties = new HashMap<>(); } additionalProperties.put(key, value); } }
#### }).onErrorMap(UtilsImpl::mapException); --- Why does this method take `Flux<ByteBuffer>` as input? The public APIs either take `BinaryData` or `OciManifest` which is then converted to `Flux<ByteBuffer>` which may not be required if we use `ByteBuffer` as the param for this method. Then BinaryData can be converted using `BinaryData.toByteBuffer()` and OciManifest can be converted using. ```java ByteBuffer byteBuffer = ByteBuffer.wrap(this.registryImplClient.getSerializerAdapter().serializeToBytes(manifest, SerializerEncoding.JSON)); ``` Suggesting this because the doc says that the content will be fully loaded into memory, so, BinaryData can be mapped to ByteBuffer instead of `Flux`. #Resolved
Mono<Response<UploadManifestResult>> uploadManifestWithResponse(Flux<ByteBuffer> data, Context context) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } BufferedFlux playableFlux = new BufferedFlux(); return data.concatMap(playableFlux::write) .then(Mono.defer(() -> Mono.just(playableFlux.getDigest()))) .flatMap(dig -> this.registriesImpl.createManifestWithResponseAsync( repositoryName, dig, playableFlux.flush(), playableFlux.getSize(), UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context)) .flatMap(response -> { Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>( response.getRequest(), response.getStatusCode(), response.getHeaders(), new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()), response.getDeserializedHeaders()); return Mono.just(res); }).onErrorMap(UtilsImpl::mapException); }
new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()),
return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono); } /** * Uploads a manifest to the repository. * The client currently only supports uploading OciManifests to the repository. * And this operation makes the assumption that the data provided is a valid OCI manifest. * <p> * Also, the data is read into memory and then an upload operation is performed as a single operation. * @see <a href="https: * * @param data The manifest that needs to be uploaded. * @return The rest response containing the operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data}
class ContainerRegistryBlobAsyncClient { private final AzureContainerRegistryImpl registryImplClient; private final ContainerRegistryBlobsImpl blobsImpl; private final ContainerRegistriesImpl registriesImpl; private final String endpoint; private final String repositoryName; private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class); ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) { this.repositoryName = repositoryName; this.endpoint = endpoint; this.registryImplClient = new AzureContainerRegistryImplBuilder() .url(endpoint) .pipeline(httpPipeline) .apiVersion(version) .buildClient(); this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs(); this.registriesImpl = this.registryImplClient.getContainerRegistries(); } /** * This method returns the registry's repository on which operations are being performed. * * @return The name of the repository */ public String getRepositoryName() { return this.repositoryName; } /** * This method returns the complete registry endpoint. * * @return The registry endpoint including the authority. */ public String getEndpoint() { return this.endpoint; } /** * Upload the Oci manifest to the repository. * The upload is done as a single operation. * @see <a href="https: * @param manifest The OciManifest that needs to be uploaded. * @return operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code manifest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) { if (manifest == null) { return monoError(logger, new NullPointerException("'manifest' can't be null.")); } try { byte[] bytes = this.registryImplClient.getSerializerAdapter().serializeToBytes(manifest, SerializerEncoding.JSON); return withContext(context -> this.uploadManifestWithResponse(Flux.just(ByteBuffer.wrap(bytes)), context)).flatMap(FluxUtil::toMono); } catch (IOException exception) { return monoError(logger, new RuntimeException(exception.getMessage())); } } /** * Uploads a manifest to the repository. * The client currently only supports uploading OciManifests to the repository. * And this operation makes the assumption that the data provided is a valid OCI manifest. * <p> * Also, the data is read into memory and then an upload operation is performed as a single operation. * @see <a href="https: * @param data The manifest that needs to be uploaded. * @return operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadManifestResult> uploadManifest(BinaryData data) { if (data == null) { is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(data.toFluxByteBuffer(), context)); } Mono<Response<UploadManifestResult>> uploadManifestWithResponse(Flux<ByteBuffer> data, Context context) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } BufferedFlux playableFlux = new BufferedFlux(); return data.concatMap(playableFlux::write) .then(Mono.defer(() -> Mono.just(playableFlux.getDigest()))) .flatMap(dig -> this.registriesImpl.createManifestWithResponseAsync( repositoryName, dig, playableFlux.flush(), playableFlux.getSize(), UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context)) .flatMap(response -> { Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>( response.getRequest(), response.getStatusCode(), response.getHeaders(), new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()), response.getDeserializedHeaders()); return Mono.just(res); }).onErrorMap(UtilsImpl::mapException); } /** * Uploads a blob to the repository. * The client currently uploads the entire blob\layer as a single unit. * <p> * The blob is read into memory and then an upload operation is performed as a single operation. * We currently do not support breaking the layer into multiple chunks and uploading them one at a time * * @param data The blob\image content that needs to be uploaded. * @return The operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadBlobResult> uploadBlob(BinaryData data) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadBlobWithResponse(data.toFluxByteBuffer(), context)).flatMap(FluxUtil::toMono); } /** * Uploads a blob to the repository. * The client currently uploads the entire blob\layer as a single unit. * <p> * The blob is read into memory and then an upload operation is performed as a single operation. * We currently do not support breaking the layer into multiple chunks and uploading them one at a time * * @param data The blob\image content that needs to be uploaded. * @return The rest response containing the operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) { return withContext(context -> this.uploadBlobWithResponse(data.toFluxByteBuffer(), context)); } Mono<Response<UploadBlobResult>> uploadBlobWithResponse(Flux<ByteBuffer> data, Context context) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } BufferedFlux playableFlux = new BufferedFlux(); AtomicReference<String> digest = new AtomicReference<>(); return data.concatMap(playableFlux::write) .then(Mono.defer(() -> Mono.just(playableFlux.getDigest()))) .flatMap(dig -> { digest.set(dig); return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context); }) .flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), playableFlux.flush(), playableFlux.getSize(), context)) .flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest.get(), trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context)) .flatMap(completeUploadResponse -> { Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(), completeUploadResponse.getStatusCode(), completeUploadResponse.getHeaders(), new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()), completeUploadResponse.getDeserializedHeaders()); return Mono.just(res); }).onErrorMap(UtilsImpl::mapException); } private String trimNextLink(String locationHeader) { if (locationHeader.startsWith("/")) { return locationHeader.substring(1); } return locationHeader; } /** * Download the manifest associated with the given tag or digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param tagOrDigest The tag or digest of the manifest. * @return The manifest associated with the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<OciManifest> downloadManifest(String tagOrDigest) { return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono); } /** * Download the manifest associated with the given tag or digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param tagOrDigest The tag or digest of the manifest. * @return The response for the manifest associated with the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest) { return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context)); } Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) { if (tagOrDigest == null) { return monoError(logger, new NullPointerException("'tagOrDigest' can't be null.")); } return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context) .flatMap(response -> { String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME); ManifestWrapper wrapper = response.getValue(); if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) { OciManifest ociManifest = new OciManifest() .setAnnotations(wrapper.getAnnotations()) .setConfig(wrapper.getConfig()) .setLayers(wrapper.getLayers()) .setSchemaVersion(wrapper.getSchemaVersion()); Response<OciManifest> res = new ResponseBase<Void, OciManifest>( response.getRequest(), response.getStatusCode(), response.getHeaders(), ociManifest, null); return Mono.just(res); } else { return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest.")); } }).onErrorMap(UtilsImpl::mapException); } /** * Download the blob associated with the given digest. * * @param digest The digest for the given image layer. * @return The image associated with the given digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DownloadBlobResult> downloadBlob(String digest) { return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Download the blob\layer associated with the given digest. * * @param digest The digest for the given image layer. * @return The image associated with the given digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) { return withContext(context -> this.downloadBlobWithResponse(digest, context)); } Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) { if (digest == null) { return monoError(logger, new NullPointerException("'digest' can't be null.")); } return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context) .flatMap(streamResponse -> { String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME); Response<DownloadBlobResult> blobResult = new ResponseBase<>( streamResponse.getRequest(), streamResponse.getStatusCode(), streamResponse.getHeaders(), null, new DownloadBlobResult() .setContent(streamResponse.getValue()) .setDigest(resDigest)); return Mono.just(blobResult); }).onErrorMap(UtilsImpl::mapException); } /** * Delete the image associated with the given digest * * @param digest The digest for the given image layer. * @return The completion signal. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteBlob(String digest) { return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Delete the image associated with the given digest * * @param digest The digest for the given image layer. * @return The REST response for the completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteBlobWithResponse(String digest) { return withContext(context -> deleteBlobWithResponse(digest, context)); } Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) { if (digest == null) { return monoError(logger, new NullPointerException("'digest' can't be null.")); } return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context) .flatMap(UtilsImpl::deleteResponseToSuccess) .onErrorMap(UtilsImpl::mapException); } /** * Delete the manifest associated with the given digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param digest The digest of the manifest. * @return The completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteManifest(String digest) { return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Delete the manifest associated with the given digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param digest The digest of the manifest. * @return The REST response for completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteManifestWithResponse(String digest) { return withContext(context -> deleteManifestWithResponse(digest, context)); } Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) { return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context) .flatMap(UtilsImpl::deleteResponseToSuccess) .onErrorMap(UtilsImpl::mapException); } static final class BufferedFlux { int size; ByteBuffer byteBuffer; int getSize() { return this.size; } String getDigest() { return UtilsImpl.computeDigest(byteBuffer); } Flux<Void> write(ByteBuffer buffer) { size += buffer.remaining(); byteBuffer = buffer; return Flux.empty(); } Flux<ByteBuffer> flush() { return Flux.just(byteBuffer); } } }
class ContainerRegistryBlobAsyncClient { private final AzureContainerRegistryImpl registryImplClient; private final ContainerRegistryBlobsImpl blobsImpl; private final ContainerRegistriesImpl registriesImpl; private final String endpoint; private final String repositoryName; private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class); ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) { this.repositoryName = repositoryName; this.endpoint = endpoint; this.registryImplClient = new AzureContainerRegistryImplBuilder() .url(endpoint) .pipeline(httpPipeline) .apiVersion(version) .buildClient(); this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs(); this.registriesImpl = this.registryImplClient.getContainerRegistries(); } /** * This method returns the registry's repository on which operations are being performed. * * @return The name of the repository */ public String getRepositoryName() { return this.repositoryName; } /** * This method returns the complete registry endpoint. * * @return The registry endpoint including the authority. */ public String getEndpoint() { return this.endpoint; } /** * Upload the Oci manifest to the repository. * The upload is done as a single operation. * @see <a href="https: * @param manifest The OciManifest that needs to be uploaded. * @return operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code manifest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) { if (manifest == null) { return monoError(logger, new NullPointerException("'manifest' can't be null.")); } return uploadManifest(BinaryData.fromObject(manifest)); } /** * Uploads a manifest to the repository. * The client currently only supports uploading OciManifests to the repository. * And this operation makes the assumption that the data provided is a valid OCI manifest. * <p> * Also, the data is read into memory and then an upload operation is performed as a single operation. * @see <a href="https: * @param data The manifest that needs to be uploaded. * @return operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadManifestResult> uploadManifest(BinaryData data) { if (data == null) { is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)); } Mono<Response<UploadManifestResult>> uploadManifestWithResponse(ByteBuffer data, Context context) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } String digest = UtilsImpl.computeDigest(data); return this.registriesImpl.createManifestWithResponseAsync( repositoryName, digest, Flux.just(data), data.remaining(), UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context).map(response -> { Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>( response.getRequest(), response.getStatusCode(), response.getHeaders(), new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()), response.getDeserializedHeaders()); return res; }).onErrorMap(UtilsImpl::mapException); } /** * Uploads a blob to the repository. * The client currently uploads the entire blob\layer as a single unit. * <p> * The blob is read into memory and then an upload operation is performed as a single operation. * We currently do not support breaking the layer into multiple chunks and uploading them one at a time * * @param data The blob\image content that needs to be uploaded. * @return The operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadBlobResult> uploadBlob(BinaryData data) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono); } /** * Uploads a blob to the repository. * The client currently uploads the entire blob\layer as a single unit. * <p> * The blob is read into memory and then an upload operation is performed as a single operation. * We currently do not support breaking the layer into multiple chunks and uploading them one at a time * * @param data The blob\image content that needs to be uploaded. * @return The rest response containing the operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) { return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)); } Mono<Response<UploadBlobResult>> uploadBlobWithResponse(ByteBuffer data, Context context) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } String digest = UtilsImpl.computeDigest(data); return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context) .flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), Flux.just(data), data.remaining(), context)) .flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest, trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context)) .flatMap(completeUploadResponse -> { Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(), completeUploadResponse.getStatusCode(), completeUploadResponse.getHeaders(), new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()), completeUploadResponse.getDeserializedHeaders()); return Mono.just(res); }).onErrorMap(UtilsImpl::mapException); } private String trimNextLink(String locationHeader) { if (locationHeader.startsWith("/")) { return locationHeader.substring(1); } return locationHeader; } /** * Download the manifest associated with the given tag or digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param tagOrDigest The tag or digest of the manifest. * @return The manifest associated with the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<OciManifest> downloadManifest(String tagOrDigest) { return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono); } /** * Download the manifest associated with the given tag or digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param tagOrDigest The tag or digest of the manifest. * @return The response for the manifest associated with the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest) { return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context)); } Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) { if (tagOrDigest == null) { return monoError(logger, new NullPointerException("'tagOrDigest' can't be null.")); } return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context) .flatMap(response -> { String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME); ManifestWrapper wrapper = response.getValue(); if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) { OciManifest ociManifest = new OciManifest() .setAnnotations(wrapper.getAnnotations()) .setConfig(wrapper.getConfig()) .setLayers(wrapper.getLayers()) .setSchemaVersion(wrapper.getSchemaVersion()); Response<OciManifest> res = new ResponseBase<Void, OciManifest>( response.getRequest(), response.getStatusCode(), response.getHeaders(), ociManifest, null); return Mono.just(res); } else { return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest.")); } }).onErrorMap(UtilsImpl::mapException); } /** * Download the blob associated with the given digest. * * @param digest The digest for the given image layer. * @return The image associated with the given digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DownloadBlobResult> downloadBlob(String digest) { return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Download the blob\layer associated with the given digest. * * @param digest The digest for the given image layer. * @return The image associated with the given digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) { return withContext(context -> this.downloadBlobWithResponse(digest, context)); } Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) { if (digest == null) { return monoError(logger, new NullPointerException("'digest' can't be null.")); } return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context).flatMap(streamResponse -> { String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME); return BinaryData.fromFlux(streamResponse.getValue()) .flatMap(binaryData -> { Response<DownloadBlobResult> response = new ResponseBase<HttpHeaders, DownloadBlobResult>( streamResponse.getRequest(), streamResponse.getStatusCode(), streamResponse.getHeaders(), new DownloadBlobResult(resDigest, binaryData), null); return Mono.just(response); }); }).onErrorMap(UtilsImpl::mapException); } /** * Delete the image associated with the given digest * * @param digest The digest for the given image layer. * @return The completion signal. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteBlob(String digest) { return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Delete the image associated with the given digest * * @param digest The digest for the given image layer. * @return The REST response for the completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteBlobWithResponse(String digest) { return withContext(context -> deleteBlobWithResponse(digest, context)); } Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) { if (digest == null) { return monoError(logger, new NullPointerException("'digest' can't be null.")); } return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context) .flatMap(UtilsImpl::deleteResponseToSuccess) .onErrorMap(UtilsImpl::mapException); } /** * Delete the manifest associated with the given digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param digest The digest of the manifest. * @return The completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteManifest(String digest) { return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Delete the manifest associated with the given digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param digest The digest of the manifest. * @return The REST response for completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteManifestWithResponse(String digest) { return withContext(context -> deleteManifestWithResponse(digest, context)); } Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) { return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context) .flatMap(UtilsImpl::deleteResponseToSuccess) .onErrorMap(UtilsImpl::mapException); } }
`audience` should be optional and `null` should be allowed. #Resolved
public ContainerRegistryBlobClientBuilder audience(ContainerRegistryAudience audience) { Objects.requireNonNull(audience, "'audience' can't be null"); this.audience = audience; return this; }
Objects.requireNonNull(audience, "'audience' can't be null");
public ContainerRegistryBlobClientBuilder audience(ContainerRegistryAudience audience) { this.audience = audience; return this; }
class ContainerRegistryBlobClientBuilder implements ConfigurationTrait<ContainerRegistryBlobClientBuilder>, EndpointTrait<ContainerRegistryBlobClientBuilder>, HttpTrait<ContainerRegistryBlobClientBuilder>, TokenCredentialTrait<ContainerRegistryBlobClientBuilder> { private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobClientBuilder.class); private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>(); private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>(); private ClientOptions clientOptions; private Configuration configuration; private String endpoint; private HttpClient httpClient; private TokenCredential credential; private HttpPipeline httpPipeline; private HttpLogOptions httpLogOptions; private RetryPolicy retryPolicy; private RetryOptions retryOptions; private ContainerRegistryServiceVersion version; private ContainerRegistryAudience audience; private String repositoryName; /** * Sets the service endpoint for the Azure Container Registry instance. * * @param endpoint The URL of the Container Registry instance. * @return The updated {@link ContainerRegistryBlobClientBuilder} object. * @throws IllegalArgumentException If {@code endpoint} is null or it cannot be parsed into a valid URL. */ public ContainerRegistryBlobClientBuilder endpoint(String endpoint) { try { new URL(endpoint); } catch (MalformedURLException ex) { throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL")); } this.endpoint = endpoint; return this; } /** * Sets the repository name for the Azure Container Registry Blob instance. * * @param repositoryName The URL of the Container Registry instance. * @return The updated {@link ContainerRegistryBlobClientBuilder} object. */ public ContainerRegistryBlobClientBuilder repository(String repositoryName) { this.repositoryName = repositoryName; return this; } /** * Sets the audience for the Azure Container Registry service. * * @param audience ARM management scope associated with the given registry. * @throws NullPointerException If {@code audience} is null. * @return The updated {@link ContainerRegistryBlobClientBuilder} object. */ /** * Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java * <a href="https: * documentation for more details on proper usage of the {@link TokenCredential} type. * * @param credential {@link TokenCredential} used to authorize requests sent to the service. * @return The updated {@link ContainerRegistryBlobClientBuilder} object. */ @Override public ContainerRegistryBlobClientBuilder credential(TokenCredential credential) { this.credential = credential; return this; } /** * Sets the {@link HttpPipeline} to use for the service client. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * <p> * If {@code pipeline} is set, all settings other than {@link * to build {@link ContainerRegistryBlobClient} or {@link ContainerRegistryBlobAsyncClient}.<br> * </p> * * This service takes dependency on an internal policy which converts Azure token credentials into Azure Container Registry specific service credentials. * In case you use your own pipeline you will have to create your own credential policy.<br> * * {For more information please see <a href="https: * * @param httpPipeline {@link HttpPipeline} to use for sending service requests and receiving responses. * @return The updated {@link ContainerRegistryClientBuilder} object. */ @Override public ContainerRegistryBlobClientBuilder pipeline(HttpPipeline httpPipeline) { if (this.httpPipeline != null && httpPipeline == null) { logger.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.httpPipeline = httpPipeline; return this; } /** * Sets the {@link ContainerRegistryServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service version and so * newer version of the client library may result in moving to a newer service version. * * @param version {@link ContainerRegistryServiceVersion} of the service to be used when making requests. * @return The updated {@link ContainerRegistryBlobClientBuilder} object. */ public ContainerRegistryBlobClientBuilder serviceVersion(ContainerRegistryServiceVersion version) { this.version = version; return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param httpClient The HTTP client to use for requests. * @return The updated {@link ContainerRegistryBlobClientBuilder} object. */ public ContainerRegistryBlobClientBuilder httpClient(HttpClient httpClient) { if (this.httpClient != null && httpClient == null) { logger.info("HttpClient is being set to 'null' when it was previously configured."); } this.httpClient = httpClient; return this; } /** * Sets the {@link ClientOptions} which enables various options to be set on the client. For example setting an * {@code applicationId} using {@link ClientOptions * the {@link UserAgentPolicy} for telemetry/monitoring purposes. * * <p>More About <a href="https: * * @param clientOptions {@link ClientOptions}. * * @return the updated {@link ContainerRegistryBlobClientBuilder} object */ public ContainerRegistryBlobClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /** * Sets the configuration store that is used during construction of the service client. * * <p>The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store to be used. * @return The updated {@link ContainerRegistryBlobClientBuilder} object. */ public ContainerRegistryBlobClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the logging configuration for HTTP requests and responses. * * <p> If logLevel is not provided, HTTP request or response logging will not happen.</p> * * @param httpLogOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return The updated {@link ContainerRegistryBlobClientBuilder} object. */ public ContainerRegistryBlobClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) { this.httpLogOptions = httpLogOptions; return this; } /** * Sets the {@link HttpPipelinePolicy} that is used to retry requests. * <p> * The default retry policy will be used if not provided {@link * build {@link ContainerRegistryBlobAsyncClient}. * * @param retryPolicy The {@link HttpPipelinePolicy} that will be used to retry requests. For example, * {@link RetryPolicy} can be used to retry requests. * * @return The updated ContainerRegistryBlobClientBuilder object. */ public ContainerRegistryBlobClientBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Sets the {@link RetryOptions} for all the requests made through the client. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * <p> * Setting this is mutually exclusive with using {@link * * @param retryOptions The {@link RetryOptions} to use for all the requests made through the client. * * @return The updated ContainerRegistryClientBuilder object. */ @Override public ContainerRegistryBlobClientBuilder retryOptions(RetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } /** * Adds a policy to the set of existing policies. * * @param policy The policy for service requests. * @return The updated ContainerRegistryBlobClientBuilder object. * @throws NullPointerException If {@code policy} is null. */ public ContainerRegistryBlobClientBuilder addPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy, "'policy' cannot be null."); if (policy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) { perCallPolicies.add(policy); } else { perRetryPolicies.add(policy); } return this; } /** * Creates a {@link ContainerRegistryBlobAsyncClient} based on options set in the Builder. Every time {@code * buildAsyncClient()} is called a new instance of {@link ContainerRegistryBlobAsyncClient} is created. * <p> * If {@link
class ContainerRegistryBlobClientBuilder implements ConfigurationTrait<ContainerRegistryBlobClientBuilder>, EndpointTrait<ContainerRegistryBlobClientBuilder>, HttpTrait<ContainerRegistryBlobClientBuilder>, TokenCredentialTrait<ContainerRegistryBlobClientBuilder> { private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobClientBuilder.class); private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>(); private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>(); private ClientOptions clientOptions; private Configuration configuration; private String endpoint; private HttpClient httpClient; private TokenCredential credential; private HttpPipeline httpPipeline; private HttpLogOptions httpLogOptions; private RetryPolicy retryPolicy; private RetryOptions retryOptions; private ContainerRegistryServiceVersion version; private ContainerRegistryAudience audience; private String repositoryName; /** * Sets the service endpoint for the Azure Container Registry instance. * * @param endpoint The URL of the Container Registry instance. * @return The updated {@link ContainerRegistryBlobClientBuilder} object. * @throws IllegalArgumentException If {@code endpoint} is null or it cannot be parsed into a valid URL. */ @Override public ContainerRegistryBlobClientBuilder endpoint(String endpoint) { try { new URL(endpoint); } catch (MalformedURLException ex) { throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL")); } this.endpoint = endpoint; return this; } /** * Sets the repository name for the Azure Container Registry Blob instance. * * @param repositoryName The URL of the Container Registry instance. * @return The updated {@link ContainerRegistryBlobClientBuilder} object. */ public ContainerRegistryBlobClientBuilder repository(String repositoryName) { this.repositoryName = repositoryName; return this; } /** * Sets the audience for the Azure Container Registry service. * * @param audience ARM management scope associated with the given registry. * @return The updated {@link ContainerRegistryBlobClientBuilder} object. */ /** * Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java * <a href="https: * documentation for more details on proper usage of the {@link TokenCredential} type. * * @param credential {@link TokenCredential} used to authorize requests sent to the service. * @return The updated {@link ContainerRegistryBlobClientBuilder} object. */ @Override public ContainerRegistryBlobClientBuilder credential(TokenCredential credential) { this.credential = credential; return this; } /** * Sets the {@link HttpPipeline} to use for the service client. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * <p> * If {@code pipeline} is set, all settings other than {@link * to build {@link ContainerRegistryBlobClient} or {@link ContainerRegistryBlobAsyncClient}.<br> * </p> * * This service takes dependency on an internal policy which converts Azure token credentials into Azure Container Registry specific service credentials. * In case you use your own pipeline you will have to create your own credential policy.<br> * * {For more information please see <a href="https: * * @param httpPipeline {@link HttpPipeline} to use for sending service requests and receiving responses. * @return The updated {@link ContainerRegistryBlobClientBuilder} object. */ @Override public ContainerRegistryBlobClientBuilder pipeline(HttpPipeline httpPipeline) { if (this.httpPipeline != null && httpPipeline == null) { logger.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.httpPipeline = httpPipeline; return this; } /** * Sets the {@link ContainerRegistryServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service version and so * newer version of the client library may result in moving to a newer service version. * * @param version {@link ContainerRegistryServiceVersion} of the service to be used when making requests. * @return The updated {@link ContainerRegistryBlobClientBuilder} object. */ public ContainerRegistryBlobClientBuilder serviceVersion(ContainerRegistryServiceVersion version) { this.version = version; return this; } /** * Sets the {@link HttpClient} to use for sending and receiving requests to and from the service. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param httpClient The {@link HttpClient} to use for requests. * @return The updated {@link ContainerRegistryBlobClientBuilder} object. */ @Override public ContainerRegistryBlobClientBuilder httpClient(HttpClient httpClient) { if (this.httpClient != null && httpClient == null) { logger.info("HttpClient is being set to 'null' when it was previously configured."); } this.httpClient = httpClient; return this; } /** * Allows for setting common properties such as application ID, headers, proxy configuration, etc. Note that it is * recommended that this method be called with an instance of the {@link HttpClientOptions} * class (a subclass of the {@link ClientOptions} base class). The HttpClientOptions subclass provides more * configuration options suitable for HTTP clients, which is applicable for any class that implements this HttpTrait * interface. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param clientOptions A configured instance of {@link HttpClientOptions}. * * @return the updated {@link ContainerRegistryBlobClientBuilder} object * @see HttpClientOptions */ @Override public ContainerRegistryBlobClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /** * Sets the configuration store that is used during construction of the service client. * * <p>The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store to be used. * @return The updated {@link ContainerRegistryBlobClientBuilder} object. */ @Override public ContainerRegistryBlobClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the {@link HttpLogOptions logging configuration} to use when sending and receiving requests to and from * the service. If a {@code logLevel} is not provided, default value of {@link HttpLogDetailLevel * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param httpLogOptions The {@link HttpLogOptions logging configuration} to use when sending and receiving requests * to and from the service. * @return The updated {@link ContainerRegistryBlobClientBuilder} object. */ @Override public ContainerRegistryBlobClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) { this.httpLogOptions = httpLogOptions; return this; } /** * Sets the {@link HttpPipelinePolicy} that is used to retry requests. * <p> * The default retry policy will be used if not provided {@link * build {@link ContainerRegistryBlobAsyncClient}. * * @param retryPolicy The {@link HttpPipelinePolicy} that will be used to retry requests. For example, * {@link RetryPolicy} can be used to retry requests. * * @return The updated ContainerRegistryBlobClientBuilder object. */ public ContainerRegistryBlobClientBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Sets the {@link RetryOptions} for all the requests made through the client. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * <p> * Setting this is mutually exclusive with using {@link * * @param retryOptions The {@link RetryOptions} to use for all the requests made through the client. * * @return The updated ContainerRegistryBlobClientBuilder object. */ @Override public ContainerRegistryBlobClientBuilder retryOptions(RetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } /** * Adds a {@link HttpPipelinePolicy pipeline policy} to apply on each request sent. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param policy A {@link HttpPipelinePolicy pipeline policy}. * @return The updated ContainerRegistryBlobClientBuilder object. * @throws NullPointerException If {@code policy} is null. */ @Override public ContainerRegistryBlobClientBuilder addPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy, "'policy' cannot be null."); if (policy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) { perCallPolicies.add(policy); } else { perRetryPolicies.add(policy); } return this; } /** * Creates a {@link ContainerRegistryBlobAsyncClient} based on options set in the Builder. Every time {@code * buildAsyncClient()} is called a new instance of {@link ContainerRegistryBlobAsyncClient} is created. * <p> * If {@link
Will the blobs ever be greater than 2GB? If so, this method will fail as ByteBuffer has a 2GB limit #Resolved
public Mono<UploadBlobResult> uploadBlob(BinaryData data) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono); }
return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono);
public Mono<UploadBlobResult> uploadBlob(BinaryData data) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono); }
class ContainerRegistryBlobAsyncClient { private final AzureContainerRegistryImpl registryImplClient; private final ContainerRegistryBlobsImpl blobsImpl; private final ContainerRegistriesImpl registriesImpl; private final String endpoint; private final String repositoryName; private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class); ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) { this.repositoryName = repositoryName; this.endpoint = endpoint; this.registryImplClient = new AzureContainerRegistryImplBuilder() .url(endpoint) .pipeline(httpPipeline) .apiVersion(version) .buildClient(); this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs(); this.registriesImpl = this.registryImplClient.getContainerRegistries(); } /** * This method returns the registry's repository on which operations are being performed. * * @return The name of the repository */ public String getRepositoryName() { return this.repositoryName; } /** * This method returns the complete registry endpoint. * * @return The registry endpoint including the authority. */ public String getEndpoint() { return this.endpoint; } /** * Upload the Oci manifest to the repository. * The upload is done as a single operation. * @see <a href="https: * @param manifest The OciManifest that needs to be uploaded. * @return operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code manifest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) { if (manifest == null) { return monoError(logger, new NullPointerException("'manifest' can't be null.")); } try { byte[] bytes = this.registryImplClient.getSerializerAdapter().serializeToBytes(manifest, SerializerEncoding.JSON); return withContext(context -> this.uploadManifestWithResponse(ByteBuffer.wrap(bytes), context)).flatMap(FluxUtil::toMono); } catch (IOException exception) { return monoError(logger, new UncheckedIOException(exception)); } } /** * Uploads a manifest to the repository. * The client currently only supports uploading OciManifests to the repository. * And this operation makes the assumption that the data provided is a valid OCI manifest. * <p> * Also, the data is read into memory and then an upload operation is performed as a single operation. * @see <a href="https: * @param data The manifest that needs to be uploaded. * @return operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadManifestResult> uploadManifest(BinaryData data) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono); } /** * Uploads a manifest to the repository. * The client currently only supports uploading OciManifests to the repository. * And this operation makes the assumption that the data provided is a valid OCI manifest. * <p> * Also, the data is read into memory and then an upload operation is performed as a single operation. * @see <a href="https: * * @param data The manifest that needs to be uploaded. * @return The rest response containing the operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)); } Mono<Response<UploadManifestResult>> uploadManifestWithResponse(ByteBuffer data, Context context) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } String digest = UtilsImpl.computeDigest(data); return this.registriesImpl.createManifestWithResponseAsync( repositoryName, digest, Flux.just(data), data.remaining(), UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context).flatMap(response -> { Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>( response.getRequest(), response.getStatusCode(), response.getHeaders(), new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()), response.getDeserializedHeaders()); return Mono.just(res); }).onErrorMap(UtilsImpl::mapException); } /** * Uploads a blob to the repository. * The client currently uploads the entire blob\layer as a single unit. * <p> * The blob is read into memory and then an upload operation is performed as a single operation. * We currently do not support breaking the layer into multiple chunks and uploading them one at a time * * @param data The blob\image content that needs to be uploaded. * @return The operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) /** * Uploads a blob to the repository. * The client currently uploads the entire blob\layer as a single unit. * <p> * The blob is read into memory and then an upload operation is performed as a single operation. * We currently do not support breaking the layer into multiple chunks and uploading them one at a time * * @param data The blob\image content that needs to be uploaded. * @return The rest response containing the operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) { return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)); } Mono<Response<UploadBlobResult>> uploadBlobWithResponse(ByteBuffer data, Context context) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } String digest = UtilsImpl.computeDigest(data); return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context) .flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), Flux.just(data), data.remaining(), context)) .flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest, trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context)) .flatMap(completeUploadResponse -> { Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(), completeUploadResponse.getStatusCode(), completeUploadResponse.getHeaders(), new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()), completeUploadResponse.getDeserializedHeaders()); return Mono.just(res); }).onErrorMap(UtilsImpl::mapException); } private String trimNextLink(String locationHeader) { if (locationHeader.startsWith("/")) { return locationHeader.substring(1); } return locationHeader; } /** * Download the manifest associated with the given tag or digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param tagOrDigest The tag or digest of the manifest. * @return The manifest associated with the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<OciManifest> downloadManifest(String tagOrDigest) { return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono); } /** * Download the manifest associated with the given tag or digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param tagOrDigest The tag or digest of the manifest. * @return The response for the manifest associated with the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest) { return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context)); } Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) { if (tagOrDigest == null) { return monoError(logger, new NullPointerException("'tagOrDigest' can't be null.")); } return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context) .flatMap(response -> { String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME); ManifestWrapper wrapper = response.getValue(); if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) { OciManifest ociManifest = new OciManifest() .setAnnotations(wrapper.getAnnotations()) .setConfig(wrapper.getConfig()) .setLayers(wrapper.getLayers()) .setSchemaVersion(wrapper.getSchemaVersion()); Response<OciManifest> res = new ResponseBase<Void, OciManifest>( response.getRequest(), response.getStatusCode(), response.getHeaders(), ociManifest, null); return Mono.just(res); } else { return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest.")); } }).onErrorMap(UtilsImpl::mapException); } /** * Download the blob associated with the given digest. * * @param digest The digest for the given image layer. * @return The image associated with the given digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DownloadBlobResult> downloadBlob(String digest) { return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Download the blob\layer associated with the given digest. * * @param digest The digest for the given image layer. * @return The image associated with the given digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) { return withContext(context -> this.downloadBlobWithResponse(digest, context)); } Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) { if (digest == null) { return monoError(logger, new NullPointerException("'digest' can't be null.")); } return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context).flatMap(streamResponse -> { String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME); return BinaryData.fromFlux(streamResponse.getValue()) .flatMap(binaryData -> { Response<DownloadBlobResult> response = new ResponseBase<HttpHeaders, DownloadBlobResult>( streamResponse.getRequest(), streamResponse.getStatusCode(), streamResponse.getHeaders(), new DownloadBlobResult().setContent(binaryData).setDigest(resDigest), null); return Mono.just(response); }); }).onErrorMap(UtilsImpl::mapException); } /** * Delete the image associated with the given digest * * @param digest The digest for the given image layer. * @return The completion signal. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteBlob(String digest) { return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Delete the image associated with the given digest * * @param digest The digest for the given image layer. * @return The REST response for the completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteBlobWithResponse(String digest) { return withContext(context -> deleteBlobWithResponse(digest, context)); } Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) { if (digest == null) { return monoError(logger, new NullPointerException("'digest' can't be null.")); } return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context) .flatMap(UtilsImpl::deleteResponseToSuccess) .onErrorMap(UtilsImpl::mapException); } /** * Delete the manifest associated with the given digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param digest The digest of the manifest. * @return The completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteManifest(String digest) { return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Delete the manifest associated with the given digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param digest The digest of the manifest. * @return The REST response for completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteManifestWithResponse(String digest) { return withContext(context -> deleteManifestWithResponse(digest, context)); } Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) { return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context) .flatMap(UtilsImpl::deleteResponseToSuccess) .onErrorMap(UtilsImpl::mapException); } }
class ContainerRegistryBlobAsyncClient { private final AzureContainerRegistryImpl registryImplClient; private final ContainerRegistryBlobsImpl blobsImpl; private final ContainerRegistriesImpl registriesImpl; private final String endpoint; private final String repositoryName; private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class); ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) { this.repositoryName = repositoryName; this.endpoint = endpoint; this.registryImplClient = new AzureContainerRegistryImplBuilder() .url(endpoint) .pipeline(httpPipeline) .apiVersion(version) .buildClient(); this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs(); this.registriesImpl = this.registryImplClient.getContainerRegistries(); } /** * This method returns the registry's repository on which operations are being performed. * * @return The name of the repository */ public String getRepositoryName() { return this.repositoryName; } /** * This method returns the complete registry endpoint. * * @return The registry endpoint including the authority. */ public String getEndpoint() { return this.endpoint; } /** * Upload the Oci manifest to the repository. * The upload is done as a single operation. * @see <a href="https: * @param manifest The OciManifest that needs to be uploaded. * @return operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code manifest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) { if (manifest == null) { return monoError(logger, new NullPointerException("'manifest' can't be null.")); } return uploadManifest(BinaryData.fromObject(manifest)); } /** * Uploads a manifest to the repository. * The client currently only supports uploading OciManifests to the repository. * And this operation makes the assumption that the data provided is a valid OCI manifest. * <p> * Also, the data is read into memory and then an upload operation is performed as a single operation. * @see <a href="https: * @param data The manifest that needs to be uploaded. * @return operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadManifestResult> uploadManifest(BinaryData data) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono); } /** * Uploads a manifest to the repository. * The client currently only supports uploading OciManifests to the repository. * And this operation makes the assumption that the data provided is a valid OCI manifest. * <p> * Also, the data is read into memory and then an upload operation is performed as a single operation. * @see <a href="https: * * @param data The manifest that needs to be uploaded. * @return The rest response containing the operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)); } Mono<Response<UploadManifestResult>> uploadManifestWithResponse(ByteBuffer data, Context context) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } String digest = UtilsImpl.computeDigest(data); return this.registriesImpl.createManifestWithResponseAsync( repositoryName, digest, Flux.just(data), data.remaining(), UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context).map(response -> { Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>( response.getRequest(), response.getStatusCode(), response.getHeaders(), new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()), response.getDeserializedHeaders()); return res; }).onErrorMap(UtilsImpl::mapException); } /** * Uploads a blob to the repository. * The client currently uploads the entire blob\layer as a single unit. * <p> * The blob is read into memory and then an upload operation is performed as a single operation. * We currently do not support breaking the layer into multiple chunks and uploading them one at a time * * @param data The blob\image content that needs to be uploaded. * @return The operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) /** * Uploads a blob to the repository. * The client currently uploads the entire blob\layer as a single unit. * <p> * The blob is read into memory and then an upload operation is performed as a single operation. * We currently do not support breaking the layer into multiple chunks and uploading them one at a time * * @param data The blob\image content that needs to be uploaded. * @return The rest response containing the operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) { return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)); } Mono<Response<UploadBlobResult>> uploadBlobWithResponse(ByteBuffer data, Context context) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } String digest = UtilsImpl.computeDigest(data); return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context) .flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), Flux.just(data), data.remaining(), context)) .flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest, trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context)) .flatMap(completeUploadResponse -> { Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(), completeUploadResponse.getStatusCode(), completeUploadResponse.getHeaders(), new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()), completeUploadResponse.getDeserializedHeaders()); return Mono.just(res); }).onErrorMap(UtilsImpl::mapException); } private String trimNextLink(String locationHeader) { if (locationHeader.startsWith("/")) { return locationHeader.substring(1); } return locationHeader; } /** * Download the manifest associated with the given tag or digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param tagOrDigest The tag or digest of the manifest. * @return The manifest associated with the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<OciManifest> downloadManifest(String tagOrDigest) { return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono); } /** * Download the manifest associated with the given tag or digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param tagOrDigest The tag or digest of the manifest. * @return The response for the manifest associated with the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest) { return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context)); } Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) { if (tagOrDigest == null) { return monoError(logger, new NullPointerException("'tagOrDigest' can't be null.")); } return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context) .flatMap(response -> { String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME); ManifestWrapper wrapper = response.getValue(); if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) { OciManifest ociManifest = new OciManifest() .setAnnotations(wrapper.getAnnotations()) .setConfig(wrapper.getConfig()) .setLayers(wrapper.getLayers()) .setSchemaVersion(wrapper.getSchemaVersion()); Response<OciManifest> res = new ResponseBase<Void, OciManifest>( response.getRequest(), response.getStatusCode(), response.getHeaders(), ociManifest, null); return Mono.just(res); } else { return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest.")); } }).onErrorMap(UtilsImpl::mapException); } /** * Download the blob associated with the given digest. * * @param digest The digest for the given image layer. * @return The image associated with the given digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DownloadBlobResult> downloadBlob(String digest) { return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Download the blob\layer associated with the given digest. * * @param digest The digest for the given image layer. * @return The image associated with the given digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) { return withContext(context -> this.downloadBlobWithResponse(digest, context)); } Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) { if (digest == null) { return monoError(logger, new NullPointerException("'digest' can't be null.")); } return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context).flatMap(streamResponse -> { String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME); return BinaryData.fromFlux(streamResponse.getValue()) .flatMap(binaryData -> { Response<DownloadBlobResult> response = new ResponseBase<HttpHeaders, DownloadBlobResult>( streamResponse.getRequest(), streamResponse.getStatusCode(), streamResponse.getHeaders(), new DownloadBlobResult(resDigest, binaryData), null); return Mono.just(response); }); }).onErrorMap(UtilsImpl::mapException); } /** * Delete the image associated with the given digest * * @param digest The digest for the given image layer. * @return The completion signal. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteBlob(String digest) { return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Delete the image associated with the given digest * * @param digest The digest for the given image layer. * @return The REST response for the completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteBlobWithResponse(String digest) { return withContext(context -> deleteBlobWithResponse(digest, context)); } Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) { if (digest == null) { return monoError(logger, new NullPointerException("'digest' can't be null.")); } return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context) .flatMap(UtilsImpl::deleteResponseToSuccess) .onErrorMap(UtilsImpl::mapException); } /** * Delete the manifest associated with the given digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param digest The digest of the manifest. * @return The completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteManifest(String digest) { return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Delete the manifest associated with the given digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param digest The digest of the manifest. * @return The REST response for completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteManifestWithResponse(String digest) { return withContext(context -> deleteManifestWithResponse(digest, context)); } Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) { return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context) .flatMap(UtilsImpl::deleteResponseToSuccess) .onErrorMap(UtilsImpl::mapException); } }
Random question, is the digest always needed or can it be optional? #Resolved
Mono<Response<UploadManifestResult>> uploadManifestWithResponse(ByteBuffer data, Context context) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } String digest = UtilsImpl.computeDigest(data); return this.registriesImpl.createManifestWithResponseAsync( repositoryName, digest, Flux.just(data), data.remaining(), UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context).flatMap(response -> { Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>( response.getRequest(), response.getStatusCode(), response.getHeaders(), new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()), response.getDeserializedHeaders()); return Mono.just(res); }).onErrorMap(UtilsImpl::mapException); }
String digest = UtilsImpl.computeDigest(data);
return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono); } /** * Uploads a manifest to the repository. * The client currently only supports uploading OciManifests to the repository. * And this operation makes the assumption that the data provided is a valid OCI manifest. * <p> * Also, the data is read into memory and then an upload operation is performed as a single operation. * @see <a href="https: * * @param data The manifest that needs to be uploaded. * @return The rest response containing the operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data}
class ContainerRegistryBlobAsyncClient { private final AzureContainerRegistryImpl registryImplClient; private final ContainerRegistryBlobsImpl blobsImpl; private final ContainerRegistriesImpl registriesImpl; private final String endpoint; private final String repositoryName; private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class); ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) { this.repositoryName = repositoryName; this.endpoint = endpoint; this.registryImplClient = new AzureContainerRegistryImplBuilder() .url(endpoint) .pipeline(httpPipeline) .apiVersion(version) .buildClient(); this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs(); this.registriesImpl = this.registryImplClient.getContainerRegistries(); } /** * This method returns the registry's repository on which operations are being performed. * * @return The name of the repository */ public String getRepositoryName() { return this.repositoryName; } /** * This method returns the complete registry endpoint. * * @return The registry endpoint including the authority. */ public String getEndpoint() { return this.endpoint; } /** * Upload the Oci manifest to the repository. * The upload is done as a single operation. * @see <a href="https: * @param manifest The OciManifest that needs to be uploaded. * @return operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code manifest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) { if (manifest == null) { return monoError(logger, new NullPointerException("'manifest' can't be null.")); } try { byte[] bytes = this.registryImplClient.getSerializerAdapter().serializeToBytes(manifest, SerializerEncoding.JSON); return withContext(context -> this.uploadManifestWithResponse(ByteBuffer.wrap(bytes), context)).flatMap(FluxUtil::toMono); } catch (IOException exception) { return monoError(logger, new UncheckedIOException(exception)); } } /** * Uploads a manifest to the repository. * The client currently only supports uploading OciManifests to the repository. * And this operation makes the assumption that the data provided is a valid OCI manifest. * <p> * Also, the data is read into memory and then an upload operation is performed as a single operation. * @see <a href="https: * @param data The manifest that needs to be uploaded. * @return operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadManifestResult> uploadManifest(BinaryData data) { if (data == null) { is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)); } Mono<Response<UploadManifestResult>> uploadManifestWithResponse(ByteBuffer data, Context context) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } String digest = UtilsImpl.computeDigest(data); return this.registriesImpl.createManifestWithResponseAsync( repositoryName, digest, Flux.just(data), data.remaining(), UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context).flatMap(response -> { Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>( response.getRequest(), response.getStatusCode(), response.getHeaders(), new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()), response.getDeserializedHeaders()); return Mono.just(res); }).onErrorMap(UtilsImpl::mapException); } /** * Uploads a blob to the repository. * The client currently uploads the entire blob\layer as a single unit. * <p> * The blob is read into memory and then an upload operation is performed as a single operation. * We currently do not support breaking the layer into multiple chunks and uploading them one at a time * * @param data The blob\image content that needs to be uploaded. * @return The operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadBlobResult> uploadBlob(BinaryData data) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono); } /** * Uploads a blob to the repository. * The client currently uploads the entire blob\layer as a single unit. * <p> * The blob is read into memory and then an upload operation is performed as a single operation. * We currently do not support breaking the layer into multiple chunks and uploading them one at a time * * @param data The blob\image content that needs to be uploaded. * @return The rest response containing the operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) { return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)); } Mono<Response<UploadBlobResult>> uploadBlobWithResponse(ByteBuffer data, Context context) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } String digest = UtilsImpl.computeDigest(data); return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context) .flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), Flux.just(data), data.remaining(), context)) .flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest, trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context)) .flatMap(completeUploadResponse -> { Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(), completeUploadResponse.getStatusCode(), completeUploadResponse.getHeaders(), new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()), completeUploadResponse.getDeserializedHeaders()); return Mono.just(res); }).onErrorMap(UtilsImpl::mapException); } private String trimNextLink(String locationHeader) { if (locationHeader.startsWith("/")) { return locationHeader.substring(1); } return locationHeader; } /** * Download the manifest associated with the given tag or digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param tagOrDigest The tag or digest of the manifest. * @return The manifest associated with the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<OciManifest> downloadManifest(String tagOrDigest) { return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono); } /** * Download the manifest associated with the given tag or digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param tagOrDigest The tag or digest of the manifest. * @return The response for the manifest associated with the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest) { return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context)); } Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) { if (tagOrDigest == null) { return monoError(logger, new NullPointerException("'tagOrDigest' can't be null.")); } return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context) .flatMap(response -> { String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME); ManifestWrapper wrapper = response.getValue(); if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) { OciManifest ociManifest = new OciManifest() .setAnnotations(wrapper.getAnnotations()) .setConfig(wrapper.getConfig()) .setLayers(wrapper.getLayers()) .setSchemaVersion(wrapper.getSchemaVersion()); Response<OciManifest> res = new ResponseBase<Void, OciManifest>( response.getRequest(), response.getStatusCode(), response.getHeaders(), ociManifest, null); return Mono.just(res); } else { return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest.")); } }).onErrorMap(UtilsImpl::mapException); } /** * Download the blob associated with the given digest. * * @param digest The digest for the given image layer. * @return The image associated with the given digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DownloadBlobResult> downloadBlob(String digest) { return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Download the blob\layer associated with the given digest. * * @param digest The digest for the given image layer. * @return The image associated with the given digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) { return withContext(context -> this.downloadBlobWithResponse(digest, context)); } Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) { if (digest == null) { return monoError(logger, new NullPointerException("'digest' can't be null.")); } return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context).flatMap(streamResponse -> { String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME); return BinaryData.fromFlux(streamResponse.getValue()) .flatMap(binaryData -> { Response<DownloadBlobResult> response = new ResponseBase<HttpHeaders, DownloadBlobResult>( streamResponse.getRequest(), streamResponse.getStatusCode(), streamResponse.getHeaders(), new DownloadBlobResult().setContent(binaryData).setDigest(resDigest), null); return Mono.just(response); }); }).onErrorMap(UtilsImpl::mapException); } /** * Delete the image associated with the given digest * * @param digest The digest for the given image layer. * @return The completion signal. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteBlob(String digest) { return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Delete the image associated with the given digest * * @param digest The digest for the given image layer. * @return The REST response for the completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteBlobWithResponse(String digest) { return withContext(context -> deleteBlobWithResponse(digest, context)); } Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) { if (digest == null) { return monoError(logger, new NullPointerException("'digest' can't be null.")); } return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context) .flatMap(UtilsImpl::deleteResponseToSuccess) .onErrorMap(UtilsImpl::mapException); } /** * Delete the manifest associated with the given digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param digest The digest of the manifest. * @return The completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteManifest(String digest) { return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Delete the manifest associated with the given digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param digest The digest of the manifest. * @return The REST response for completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteManifestWithResponse(String digest) { return withContext(context -> deleteManifestWithResponse(digest, context)); } Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) { return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context) .flatMap(UtilsImpl::deleteResponseToSuccess) .onErrorMap(UtilsImpl::mapException); } }
class ContainerRegistryBlobAsyncClient { private final AzureContainerRegistryImpl registryImplClient; private final ContainerRegistryBlobsImpl blobsImpl; private final ContainerRegistriesImpl registriesImpl; private final String endpoint; private final String repositoryName; private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class); ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) { this.repositoryName = repositoryName; this.endpoint = endpoint; this.registryImplClient = new AzureContainerRegistryImplBuilder() .url(endpoint) .pipeline(httpPipeline) .apiVersion(version) .buildClient(); this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs(); this.registriesImpl = this.registryImplClient.getContainerRegistries(); } /** * This method returns the registry's repository on which operations are being performed. * * @return The name of the repository */ public String getRepositoryName() { return this.repositoryName; } /** * This method returns the complete registry endpoint. * * @return The registry endpoint including the authority. */ public String getEndpoint() { return this.endpoint; } /** * Upload the Oci manifest to the repository. * The upload is done as a single operation. * @see <a href="https: * @param manifest The OciManifest that needs to be uploaded. * @return operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code manifest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) { if (manifest == null) { return monoError(logger, new NullPointerException("'manifest' can't be null.")); } return uploadManifest(BinaryData.fromObject(manifest)); } /** * Uploads a manifest to the repository. * The client currently only supports uploading OciManifests to the repository. * And this operation makes the assumption that the data provided is a valid OCI manifest. * <p> * Also, the data is read into memory and then an upload operation is performed as a single operation. * @see <a href="https: * @param data The manifest that needs to be uploaded. * @return operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadManifestResult> uploadManifest(BinaryData data) { if (data == null) { is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)); } Mono<Response<UploadManifestResult>> uploadManifestWithResponse(ByteBuffer data, Context context) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } String digest = UtilsImpl.computeDigest(data); return this.registriesImpl.createManifestWithResponseAsync( repositoryName, digest, Flux.just(data), data.remaining(), UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context).map(response -> { Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>( response.getRequest(), response.getStatusCode(), response.getHeaders(), new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()), response.getDeserializedHeaders()); return res; }).onErrorMap(UtilsImpl::mapException); } /** * Uploads a blob to the repository. * The client currently uploads the entire blob\layer as a single unit. * <p> * The blob is read into memory and then an upload operation is performed as a single operation. * We currently do not support breaking the layer into multiple chunks and uploading them one at a time * * @param data The blob\image content that needs to be uploaded. * @return The operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadBlobResult> uploadBlob(BinaryData data) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono); } /** * Uploads a blob to the repository. * The client currently uploads the entire blob\layer as a single unit. * <p> * The blob is read into memory and then an upload operation is performed as a single operation. * We currently do not support breaking the layer into multiple chunks and uploading them one at a time * * @param data The blob\image content that needs to be uploaded. * @return The rest response containing the operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) { return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)); } Mono<Response<UploadBlobResult>> uploadBlobWithResponse(ByteBuffer data, Context context) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } String digest = UtilsImpl.computeDigest(data); return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context) .flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), Flux.just(data), data.remaining(), context)) .flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest, trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context)) .flatMap(completeUploadResponse -> { Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(), completeUploadResponse.getStatusCode(), completeUploadResponse.getHeaders(), new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()), completeUploadResponse.getDeserializedHeaders()); return Mono.just(res); }).onErrorMap(UtilsImpl::mapException); } private String trimNextLink(String locationHeader) { if (locationHeader.startsWith("/")) { return locationHeader.substring(1); } return locationHeader; } /** * Download the manifest associated with the given tag or digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param tagOrDigest The tag or digest of the manifest. * @return The manifest associated with the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<OciManifest> downloadManifest(String tagOrDigest) { return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono); } /** * Download the manifest associated with the given tag or digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param tagOrDigest The tag or digest of the manifest. * @return The response for the manifest associated with the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest) { return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context)); } Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) { if (tagOrDigest == null) { return monoError(logger, new NullPointerException("'tagOrDigest' can't be null.")); } return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context) .flatMap(response -> { String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME); ManifestWrapper wrapper = response.getValue(); if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) { OciManifest ociManifest = new OciManifest() .setAnnotations(wrapper.getAnnotations()) .setConfig(wrapper.getConfig()) .setLayers(wrapper.getLayers()) .setSchemaVersion(wrapper.getSchemaVersion()); Response<OciManifest> res = new ResponseBase<Void, OciManifest>( response.getRequest(), response.getStatusCode(), response.getHeaders(), ociManifest, null); return Mono.just(res); } else { return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest.")); } }).onErrorMap(UtilsImpl::mapException); } /** * Download the blob associated with the given digest. * * @param digest The digest for the given image layer. * @return The image associated with the given digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DownloadBlobResult> downloadBlob(String digest) { return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Download the blob\layer associated with the given digest. * * @param digest The digest for the given image layer. * @return The image associated with the given digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) { return withContext(context -> this.downloadBlobWithResponse(digest, context)); } Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) { if (digest == null) { return monoError(logger, new NullPointerException("'digest' can't be null.")); } return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context).flatMap(streamResponse -> { String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME); return BinaryData.fromFlux(streamResponse.getValue()) .flatMap(binaryData -> { Response<DownloadBlobResult> response = new ResponseBase<HttpHeaders, DownloadBlobResult>( streamResponse.getRequest(), streamResponse.getStatusCode(), streamResponse.getHeaders(), new DownloadBlobResult(resDigest, binaryData), null); return Mono.just(response); }); }).onErrorMap(UtilsImpl::mapException); } /** * Delete the image associated with the given digest * * @param digest The digest for the given image layer. * @return The completion signal. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteBlob(String digest) { return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Delete the image associated with the given digest * * @param digest The digest for the given image layer. * @return The REST response for the completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteBlobWithResponse(String digest) { return withContext(context -> deleteBlobWithResponse(digest, context)); } Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) { if (digest == null) { return monoError(logger, new NullPointerException("'digest' can't be null.")); } return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context) .flatMap(UtilsImpl::deleteResponseToSuccess) .onErrorMap(UtilsImpl::mapException); } /** * Delete the manifest associated with the given digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param digest The digest of the manifest. * @return The completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteManifest(String digest) { return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Delete the manifest associated with the given digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param digest The digest of the manifest. * @return The REST response for completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteManifestWithResponse(String digest) { return withContext(context -> deleteManifestWithResponse(digest, context)); } Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) { return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context) .flatMap(UtilsImpl::deleteResponseToSuccess) .onErrorMap(UtilsImpl::mapException); } }
nit: just use `map` here so you don't need to wrap the value in `Mono.just` #Resolved
Mono<Response<UploadManifestResult>> uploadManifestWithResponse(ByteBuffer data, Context context) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } String digest = UtilsImpl.computeDigest(data); return this.registriesImpl.createManifestWithResponseAsync( repositoryName, digest, Flux.just(data), data.remaining(), UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context).flatMap(response -> { Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>( response.getRequest(), response.getStatusCode(), response.getHeaders(), new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()), response.getDeserializedHeaders()); return Mono.just(res); }).onErrorMap(UtilsImpl::mapException); }
context).flatMap(response -> {
return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono); } /** * Uploads a manifest to the repository. * The client currently only supports uploading OciManifests to the repository. * And this operation makes the assumption that the data provided is a valid OCI manifest. * <p> * Also, the data is read into memory and then an upload operation is performed as a single operation. * @see <a href="https: * * @param data The manifest that needs to be uploaded. * @return The rest response containing the operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data}
class ContainerRegistryBlobAsyncClient { private final AzureContainerRegistryImpl registryImplClient; private final ContainerRegistryBlobsImpl blobsImpl; private final ContainerRegistriesImpl registriesImpl; private final String endpoint; private final String repositoryName; private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class); ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) { this.repositoryName = repositoryName; this.endpoint = endpoint; this.registryImplClient = new AzureContainerRegistryImplBuilder() .url(endpoint) .pipeline(httpPipeline) .apiVersion(version) .buildClient(); this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs(); this.registriesImpl = this.registryImplClient.getContainerRegistries(); } /** * This method returns the registry's repository on which operations are being performed. * * @return The name of the repository */ public String getRepositoryName() { return this.repositoryName; } /** * This method returns the complete registry endpoint. * * @return The registry endpoint including the authority. */ public String getEndpoint() { return this.endpoint; } /** * Upload the Oci manifest to the repository. * The upload is done as a single operation. * @see <a href="https: * @param manifest The OciManifest that needs to be uploaded. * @return operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code manifest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) { if (manifest == null) { return monoError(logger, new NullPointerException("'manifest' can't be null.")); } try { byte[] bytes = this.registryImplClient.getSerializerAdapter().serializeToBytes(manifest, SerializerEncoding.JSON); return withContext(context -> this.uploadManifestWithResponse(ByteBuffer.wrap(bytes), context)).flatMap(FluxUtil::toMono); } catch (IOException exception) { return monoError(logger, new UncheckedIOException(exception)); } } /** * Uploads a manifest to the repository. * The client currently only supports uploading OciManifests to the repository. * And this operation makes the assumption that the data provided is a valid OCI manifest. * <p> * Also, the data is read into memory and then an upload operation is performed as a single operation. * @see <a href="https: * @param data The manifest that needs to be uploaded. * @return operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadManifestResult> uploadManifest(BinaryData data) { if (data == null) { is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)); } Mono<Response<UploadManifestResult>> uploadManifestWithResponse(ByteBuffer data, Context context) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } String digest = UtilsImpl.computeDigest(data); return this.registriesImpl.createManifestWithResponseAsync( repositoryName, digest, Flux.just(data), data.remaining(), UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context).flatMap(response -> { Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>( response.getRequest(), response.getStatusCode(), response.getHeaders(), new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()), response.getDeserializedHeaders()); return Mono.just(res); }).onErrorMap(UtilsImpl::mapException); } /** * Uploads a blob to the repository. * The client currently uploads the entire blob\layer as a single unit. * <p> * The blob is read into memory and then an upload operation is performed as a single operation. * We currently do not support breaking the layer into multiple chunks and uploading them one at a time * * @param data The blob\image content that needs to be uploaded. * @return The operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadBlobResult> uploadBlob(BinaryData data) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono); } /** * Uploads a blob to the repository. * The client currently uploads the entire blob\layer as a single unit. * <p> * The blob is read into memory and then an upload operation is performed as a single operation. * We currently do not support breaking the layer into multiple chunks and uploading them one at a time * * @param data The blob\image content that needs to be uploaded. * @return The rest response containing the operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) { return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)); } Mono<Response<UploadBlobResult>> uploadBlobWithResponse(ByteBuffer data, Context context) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } String digest = UtilsImpl.computeDigest(data); return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context) .flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), Flux.just(data), data.remaining(), context)) .flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest, trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context)) .flatMap(completeUploadResponse -> { Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(), completeUploadResponse.getStatusCode(), completeUploadResponse.getHeaders(), new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()), completeUploadResponse.getDeserializedHeaders()); return Mono.just(res); }).onErrorMap(UtilsImpl::mapException); } private String trimNextLink(String locationHeader) { if (locationHeader.startsWith("/")) { return locationHeader.substring(1); } return locationHeader; } /** * Download the manifest associated with the given tag or digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param tagOrDigest The tag or digest of the manifest. * @return The manifest associated with the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<OciManifest> downloadManifest(String tagOrDigest) { return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono); } /** * Download the manifest associated with the given tag or digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param tagOrDigest The tag or digest of the manifest. * @return The response for the manifest associated with the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest) { return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context)); } Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) { if (tagOrDigest == null) { return monoError(logger, new NullPointerException("'tagOrDigest' can't be null.")); } return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context) .flatMap(response -> { String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME); ManifestWrapper wrapper = response.getValue(); if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) { OciManifest ociManifest = new OciManifest() .setAnnotations(wrapper.getAnnotations()) .setConfig(wrapper.getConfig()) .setLayers(wrapper.getLayers()) .setSchemaVersion(wrapper.getSchemaVersion()); Response<OciManifest> res = new ResponseBase<Void, OciManifest>( response.getRequest(), response.getStatusCode(), response.getHeaders(), ociManifest, null); return Mono.just(res); } else { return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest.")); } }).onErrorMap(UtilsImpl::mapException); } /** * Download the blob associated with the given digest. * * @param digest The digest for the given image layer. * @return The image associated with the given digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DownloadBlobResult> downloadBlob(String digest) { return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Download the blob\layer associated with the given digest. * * @param digest The digest for the given image layer. * @return The image associated with the given digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) { return withContext(context -> this.downloadBlobWithResponse(digest, context)); } Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) { if (digest == null) { return monoError(logger, new NullPointerException("'digest' can't be null.")); } return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context).flatMap(streamResponse -> { String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME); return BinaryData.fromFlux(streamResponse.getValue()) .flatMap(binaryData -> { Response<DownloadBlobResult> response = new ResponseBase<HttpHeaders, DownloadBlobResult>( streamResponse.getRequest(), streamResponse.getStatusCode(), streamResponse.getHeaders(), new DownloadBlobResult().setContent(binaryData).setDigest(resDigest), null); return Mono.just(response); }); }).onErrorMap(UtilsImpl::mapException); } /** * Delete the image associated with the given digest * * @param digest The digest for the given image layer. * @return The completion signal. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteBlob(String digest) { return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Delete the image associated with the given digest * * @param digest The digest for the given image layer. * @return The REST response for the completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteBlobWithResponse(String digest) { return withContext(context -> deleteBlobWithResponse(digest, context)); } Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) { if (digest == null) { return monoError(logger, new NullPointerException("'digest' can't be null.")); } return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context) .flatMap(UtilsImpl::deleteResponseToSuccess) .onErrorMap(UtilsImpl::mapException); } /** * Delete the manifest associated with the given digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param digest The digest of the manifest. * @return The completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteManifest(String digest) { return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Delete the manifest associated with the given digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param digest The digest of the manifest. * @return The REST response for completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteManifestWithResponse(String digest) { return withContext(context -> deleteManifestWithResponse(digest, context)); } Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) { return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context) .flatMap(UtilsImpl::deleteResponseToSuccess) .onErrorMap(UtilsImpl::mapException); } }
class ContainerRegistryBlobAsyncClient { private final AzureContainerRegistryImpl registryImplClient; private final ContainerRegistryBlobsImpl blobsImpl; private final ContainerRegistriesImpl registriesImpl; private final String endpoint; private final String repositoryName; private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class); ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) { this.repositoryName = repositoryName; this.endpoint = endpoint; this.registryImplClient = new AzureContainerRegistryImplBuilder() .url(endpoint) .pipeline(httpPipeline) .apiVersion(version) .buildClient(); this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs(); this.registriesImpl = this.registryImplClient.getContainerRegistries(); } /** * This method returns the registry's repository on which operations are being performed. * * @return The name of the repository */ public String getRepositoryName() { return this.repositoryName; } /** * This method returns the complete registry endpoint. * * @return The registry endpoint including the authority. */ public String getEndpoint() { return this.endpoint; } /** * Upload the Oci manifest to the repository. * The upload is done as a single operation. * @see <a href="https: * @param manifest The OciManifest that needs to be uploaded. * @return operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code manifest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) { if (manifest == null) { return monoError(logger, new NullPointerException("'manifest' can't be null.")); } return uploadManifest(BinaryData.fromObject(manifest)); } /** * Uploads a manifest to the repository. * The client currently only supports uploading OciManifests to the repository. * And this operation makes the assumption that the data provided is a valid OCI manifest. * <p> * Also, the data is read into memory and then an upload operation is performed as a single operation. * @see <a href="https: * @param data The manifest that needs to be uploaded. * @return operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadManifestResult> uploadManifest(BinaryData data) { if (data == null) { is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)); } Mono<Response<UploadManifestResult>> uploadManifestWithResponse(ByteBuffer data, Context context) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } String digest = UtilsImpl.computeDigest(data); return this.registriesImpl.createManifestWithResponseAsync( repositoryName, digest, Flux.just(data), data.remaining(), UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context).map(response -> { Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>( response.getRequest(), response.getStatusCode(), response.getHeaders(), new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()), response.getDeserializedHeaders()); return res; }).onErrorMap(UtilsImpl::mapException); } /** * Uploads a blob to the repository. * The client currently uploads the entire blob\layer as a single unit. * <p> * The blob is read into memory and then an upload operation is performed as a single operation. * We currently do not support breaking the layer into multiple chunks and uploading them one at a time * * @param data The blob\image content that needs to be uploaded. * @return The operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadBlobResult> uploadBlob(BinaryData data) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono); } /** * Uploads a blob to the repository. * The client currently uploads the entire blob\layer as a single unit. * <p> * The blob is read into memory and then an upload operation is performed as a single operation. * We currently do not support breaking the layer into multiple chunks and uploading them one at a time * * @param data The blob\image content that needs to be uploaded. * @return The rest response containing the operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) { return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)); } Mono<Response<UploadBlobResult>> uploadBlobWithResponse(ByteBuffer data, Context context) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } String digest = UtilsImpl.computeDigest(data); return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context) .flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), Flux.just(data), data.remaining(), context)) .flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest, trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context)) .flatMap(completeUploadResponse -> { Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(), completeUploadResponse.getStatusCode(), completeUploadResponse.getHeaders(), new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()), completeUploadResponse.getDeserializedHeaders()); return Mono.just(res); }).onErrorMap(UtilsImpl::mapException); } private String trimNextLink(String locationHeader) { if (locationHeader.startsWith("/")) { return locationHeader.substring(1); } return locationHeader; } /** * Download the manifest associated with the given tag or digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param tagOrDigest The tag or digest of the manifest. * @return The manifest associated with the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<OciManifest> downloadManifest(String tagOrDigest) { return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono); } /** * Download the manifest associated with the given tag or digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param tagOrDigest The tag or digest of the manifest. * @return The response for the manifest associated with the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest) { return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context)); } Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) { if (tagOrDigest == null) { return monoError(logger, new NullPointerException("'tagOrDigest' can't be null.")); } return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context) .flatMap(response -> { String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME); ManifestWrapper wrapper = response.getValue(); if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) { OciManifest ociManifest = new OciManifest() .setAnnotations(wrapper.getAnnotations()) .setConfig(wrapper.getConfig()) .setLayers(wrapper.getLayers()) .setSchemaVersion(wrapper.getSchemaVersion()); Response<OciManifest> res = new ResponseBase<Void, OciManifest>( response.getRequest(), response.getStatusCode(), response.getHeaders(), ociManifest, null); return Mono.just(res); } else { return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest.")); } }).onErrorMap(UtilsImpl::mapException); } /** * Download the blob associated with the given digest. * * @param digest The digest for the given image layer. * @return The image associated with the given digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DownloadBlobResult> downloadBlob(String digest) { return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Download the blob\layer associated with the given digest. * * @param digest The digest for the given image layer. * @return The image associated with the given digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) { return withContext(context -> this.downloadBlobWithResponse(digest, context)); } Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) { if (digest == null) { return monoError(logger, new NullPointerException("'digest' can't be null.")); } return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context).flatMap(streamResponse -> { String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME); return BinaryData.fromFlux(streamResponse.getValue()) .flatMap(binaryData -> { Response<DownloadBlobResult> response = new ResponseBase<HttpHeaders, DownloadBlobResult>( streamResponse.getRequest(), streamResponse.getStatusCode(), streamResponse.getHeaders(), new DownloadBlobResult(resDigest, binaryData), null); return Mono.just(response); }); }).onErrorMap(UtilsImpl::mapException); } /** * Delete the image associated with the given digest * * @param digest The digest for the given image layer. * @return The completion signal. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteBlob(String digest) { return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Delete the image associated with the given digest * * @param digest The digest for the given image layer. * @return The REST response for the completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteBlobWithResponse(String digest) { return withContext(context -> deleteBlobWithResponse(digest, context)); } Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) { if (digest == null) { return monoError(logger, new NullPointerException("'digest' can't be null.")); } return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context) .flatMap(UtilsImpl::deleteResponseToSuccess) .onErrorMap(UtilsImpl::mapException); } /** * Delete the manifest associated with the given digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param digest The digest of the manifest. * @return The completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteManifest(String digest) { return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Delete the manifest associated with the given digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param digest The digest of the manifest. * @return The REST response for completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteManifestWithResponse(String digest) { return withContext(context -> deleteManifestWithResponse(digest, context)); } Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) { return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context) .flatMap(UtilsImpl::deleteResponseToSuccess) .onErrorMap(UtilsImpl::mapException); } }
```suggestion uploadManifest(BinaryData.fromObject(manifest)); ``` #Resolved
public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) { if (manifest == null) { return monoError(logger, new NullPointerException("'manifest' can't be null.")); } try { byte[] bytes = this.registryImplClient.getSerializerAdapter().serializeToBytes(manifest, SerializerEncoding.JSON); return withContext(context -> this.uploadManifestWithResponse(ByteBuffer.wrap(bytes), context)).flatMap(FluxUtil::toMono); } catch (IOException exception) { return monoError(logger, new UncheckedIOException(exception)); } }
}
public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) { if (manifest == null) { return monoError(logger, new NullPointerException("'manifest' can't be null.")); } return uploadManifest(BinaryData.fromObject(manifest)); }
class ContainerRegistryBlobAsyncClient { private final AzureContainerRegistryImpl registryImplClient; private final ContainerRegistryBlobsImpl blobsImpl; private final ContainerRegistriesImpl registriesImpl; private final String endpoint; private final String repositoryName; private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class); ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) { this.repositoryName = repositoryName; this.endpoint = endpoint; this.registryImplClient = new AzureContainerRegistryImplBuilder() .url(endpoint) .pipeline(httpPipeline) .apiVersion(version) .buildClient(); this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs(); this.registriesImpl = this.registryImplClient.getContainerRegistries(); } /** * This method returns the registry's repository on which operations are being performed. * * @return The name of the repository */ public String getRepositoryName() { return this.repositoryName; } /** * This method returns the complete registry endpoint. * * @return The registry endpoint including the authority. */ public String getEndpoint() { return this.endpoint; } /** * Upload the Oci manifest to the repository. * The upload is done as a single operation. * @see <a href="https: * @param manifest The OciManifest that needs to be uploaded. * @return operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code manifest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) /** * Uploads a manifest to the repository. * The client currently only supports uploading OciManifests to the repository. * And this operation makes the assumption that the data provided is a valid OCI manifest. * <p> * Also, the data is read into memory and then an upload operation is performed as a single operation. * @see <a href="https: * @param data The manifest that needs to be uploaded. * @return operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadManifestResult> uploadManifest(BinaryData data) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono); } /** * Uploads a manifest to the repository. * The client currently only supports uploading OciManifests to the repository. * And this operation makes the assumption that the data provided is a valid OCI manifest. * <p> * Also, the data is read into memory and then an upload operation is performed as a single operation. * @see <a href="https: * * @param data The manifest that needs to be uploaded. * @return The rest response containing the operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)); } Mono<Response<UploadManifestResult>> uploadManifestWithResponse(ByteBuffer data, Context context) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } String digest = UtilsImpl.computeDigest(data); return this.registriesImpl.createManifestWithResponseAsync( repositoryName, digest, Flux.just(data), data.remaining(), UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context).map(response -> { Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>( response.getRequest(), response.getStatusCode(), response.getHeaders(), new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()), response.getDeserializedHeaders()); return res; }).onErrorMap(UtilsImpl::mapException); } /** * Uploads a blob to the repository. * The client currently uploads the entire blob\layer as a single unit. * <p> * The blob is read into memory and then an upload operation is performed as a single operation. * We currently do not support breaking the layer into multiple chunks and uploading them one at a time * * @param data The blob\image content that needs to be uploaded. * @return The operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadBlobResult> uploadBlob(BinaryData data) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono); } /** * Uploads a blob to the repository. * The client currently uploads the entire blob\layer as a single unit. * <p> * The blob is read into memory and then an upload operation is performed as a single operation. * We currently do not support breaking the layer into multiple chunks and uploading them one at a time * * @param data The blob\image content that needs to be uploaded. * @return The rest response containing the operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) { return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)); } Mono<Response<UploadBlobResult>> uploadBlobWithResponse(ByteBuffer data, Context context) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } String digest = UtilsImpl.computeDigest(data); return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context) .flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), Flux.just(data), data.remaining(), context)) .flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest, trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context)) .flatMap(completeUploadResponse -> { Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(), completeUploadResponse.getStatusCode(), completeUploadResponse.getHeaders(), new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()), completeUploadResponse.getDeserializedHeaders()); return Mono.just(res); }).onErrorMap(UtilsImpl::mapException); } private String trimNextLink(String locationHeader) { if (locationHeader.startsWith("/")) { return locationHeader.substring(1); } return locationHeader; } /** * Download the manifest associated with the given tag or digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param tagOrDigest The tag or digest of the manifest. * @return The manifest associated with the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<OciManifest> downloadManifest(String tagOrDigest) { return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono); } /** * Download the manifest associated with the given tag or digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param tagOrDigest The tag or digest of the manifest. * @return The response for the manifest associated with the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest) { return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context)); } Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) { if (tagOrDigest == null) { return monoError(logger, new NullPointerException("'tagOrDigest' can't be null.")); } return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context) .flatMap(response -> { String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME); ManifestWrapper wrapper = response.getValue(); if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) { OciManifest ociManifest = new OciManifest() .setAnnotations(wrapper.getAnnotations()) .setConfig(wrapper.getConfig()) .setLayers(wrapper.getLayers()) .setSchemaVersion(wrapper.getSchemaVersion()); Response<OciManifest> res = new ResponseBase<Void, OciManifest>( response.getRequest(), response.getStatusCode(), response.getHeaders(), ociManifest, null); return Mono.just(res); } else { return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest.")); } }).onErrorMap(UtilsImpl::mapException); } /** * Download the blob associated with the given digest. * * @param digest The digest for the given image layer. * @return The image associated with the given digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DownloadBlobResult> downloadBlob(String digest) { return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Download the blob\layer associated with the given digest. * * @param digest The digest for the given image layer. * @return The image associated with the given digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) { return withContext(context -> this.downloadBlobWithResponse(digest, context)); } Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) { if (digest == null) { return monoError(logger, new NullPointerException("'digest' can't be null.")); } return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context).flatMap(streamResponse -> { String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME); return BinaryData.fromFlux(streamResponse.getValue()) .flatMap(binaryData -> { Response<DownloadBlobResult> response = new ResponseBase<HttpHeaders, DownloadBlobResult>( streamResponse.getRequest(), streamResponse.getStatusCode(), streamResponse.getHeaders(), new DownloadBlobResult().setContent(binaryData).setDigest(resDigest), null); return Mono.just(response); }); }).onErrorMap(UtilsImpl::mapException); } /** * Delete the image associated with the given digest * * @param digest The digest for the given image layer. * @return The completion signal. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteBlob(String digest) { return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Delete the image associated with the given digest * * @param digest The digest for the given image layer. * @return The REST response for the completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteBlobWithResponse(String digest) { return withContext(context -> deleteBlobWithResponse(digest, context)); } Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) { if (digest == null) { return monoError(logger, new NullPointerException("'digest' can't be null.")); } return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context) .flatMap(UtilsImpl::deleteResponseToSuccess) .onErrorMap(UtilsImpl::mapException); } /** * Delete the manifest associated with the given digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param digest The digest of the manifest. * @return The completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteManifest(String digest) { return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Delete the manifest associated with the given digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param digest The digest of the manifest. * @return The REST response for completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteManifestWithResponse(String digest) { return withContext(context -> deleteManifestWithResponse(digest, context)); } Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) { return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context) .flatMap(UtilsImpl::deleteResponseToSuccess) .onErrorMap(UtilsImpl::mapException); } }
class ContainerRegistryBlobAsyncClient { private final AzureContainerRegistryImpl registryImplClient; private final ContainerRegistryBlobsImpl blobsImpl; private final ContainerRegistriesImpl registriesImpl; private final String endpoint; private final String repositoryName; private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class); ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) { this.repositoryName = repositoryName; this.endpoint = endpoint; this.registryImplClient = new AzureContainerRegistryImplBuilder() .url(endpoint) .pipeline(httpPipeline) .apiVersion(version) .buildClient(); this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs(); this.registriesImpl = this.registryImplClient.getContainerRegistries(); } /** * This method returns the registry's repository on which operations are being performed. * * @return The name of the repository */ public String getRepositoryName() { return this.repositoryName; } /** * This method returns the complete registry endpoint. * * @return The registry endpoint including the authority. */ public String getEndpoint() { return this.endpoint; } /** * Upload the Oci manifest to the repository. * The upload is done as a single operation. * @see <a href="https: * @param manifest The OciManifest that needs to be uploaded. * @return operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code manifest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) /** * Uploads a manifest to the repository. * The client currently only supports uploading OciManifests to the repository. * And this operation makes the assumption that the data provided is a valid OCI manifest. * <p> * Also, the data is read into memory and then an upload operation is performed as a single operation. * @see <a href="https: * @param data The manifest that needs to be uploaded. * @return operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadManifestResult> uploadManifest(BinaryData data) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono); } /** * Uploads a manifest to the repository. * The client currently only supports uploading OciManifests to the repository. * And this operation makes the assumption that the data provided is a valid OCI manifest. * <p> * Also, the data is read into memory and then an upload operation is performed as a single operation. * @see <a href="https: * * @param data The manifest that needs to be uploaded. * @return The rest response containing the operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)); } Mono<Response<UploadManifestResult>> uploadManifestWithResponse(ByteBuffer data, Context context) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } String digest = UtilsImpl.computeDigest(data); return this.registriesImpl.createManifestWithResponseAsync( repositoryName, digest, Flux.just(data), data.remaining(), UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context).map(response -> { Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>( response.getRequest(), response.getStatusCode(), response.getHeaders(), new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()), response.getDeserializedHeaders()); return res; }).onErrorMap(UtilsImpl::mapException); } /** * Uploads a blob to the repository. * The client currently uploads the entire blob\layer as a single unit. * <p> * The blob is read into memory and then an upload operation is performed as a single operation. * We currently do not support breaking the layer into multiple chunks and uploading them one at a time * * @param data The blob\image content that needs to be uploaded. * @return The operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadBlobResult> uploadBlob(BinaryData data) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono); } /** * Uploads a blob to the repository. * The client currently uploads the entire blob\layer as a single unit. * <p> * The blob is read into memory and then an upload operation is performed as a single operation. * We currently do not support breaking the layer into multiple chunks and uploading them one at a time * * @param data The blob\image content that needs to be uploaded. * @return The rest response containing the operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) { return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)); } Mono<Response<UploadBlobResult>> uploadBlobWithResponse(ByteBuffer data, Context context) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } String digest = UtilsImpl.computeDigest(data); return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context) .flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), Flux.just(data), data.remaining(), context)) .flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest, trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context)) .flatMap(completeUploadResponse -> { Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(), completeUploadResponse.getStatusCode(), completeUploadResponse.getHeaders(), new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()), completeUploadResponse.getDeserializedHeaders()); return Mono.just(res); }).onErrorMap(UtilsImpl::mapException); } private String trimNextLink(String locationHeader) { if (locationHeader.startsWith("/")) { return locationHeader.substring(1); } return locationHeader; } /** * Download the manifest associated with the given tag or digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param tagOrDigest The tag or digest of the manifest. * @return The manifest associated with the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<OciManifest> downloadManifest(String tagOrDigest) { return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono); } /** * Download the manifest associated with the given tag or digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param tagOrDigest The tag or digest of the manifest. * @return The response for the manifest associated with the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest) { return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context)); } Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) { if (tagOrDigest == null) { return monoError(logger, new NullPointerException("'tagOrDigest' can't be null.")); } return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context) .flatMap(response -> { String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME); ManifestWrapper wrapper = response.getValue(); if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) { OciManifest ociManifest = new OciManifest() .setAnnotations(wrapper.getAnnotations()) .setConfig(wrapper.getConfig()) .setLayers(wrapper.getLayers()) .setSchemaVersion(wrapper.getSchemaVersion()); Response<OciManifest> res = new ResponseBase<Void, OciManifest>( response.getRequest(), response.getStatusCode(), response.getHeaders(), ociManifest, null); return Mono.just(res); } else { return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest.")); } }).onErrorMap(UtilsImpl::mapException); } /** * Download the blob associated with the given digest. * * @param digest The digest for the given image layer. * @return The image associated with the given digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DownloadBlobResult> downloadBlob(String digest) { return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Download the blob\layer associated with the given digest. * * @param digest The digest for the given image layer. * @return The image associated with the given digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) { return withContext(context -> this.downloadBlobWithResponse(digest, context)); } Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) { if (digest == null) { return monoError(logger, new NullPointerException("'digest' can't be null.")); } return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context).flatMap(streamResponse -> { String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME); return BinaryData.fromFlux(streamResponse.getValue()) .flatMap(binaryData -> { Response<DownloadBlobResult> response = new ResponseBase<HttpHeaders, DownloadBlobResult>( streamResponse.getRequest(), streamResponse.getStatusCode(), streamResponse.getHeaders(), new DownloadBlobResult(resDigest, binaryData), null); return Mono.just(response); }); }).onErrorMap(UtilsImpl::mapException); } /** * Delete the image associated with the given digest * * @param digest The digest for the given image layer. * @return The completion signal. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteBlob(String digest) { return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Delete the image associated with the given digest * * @param digest The digest for the given image layer. * @return The REST response for the completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteBlobWithResponse(String digest) { return withContext(context -> deleteBlobWithResponse(digest, context)); } Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) { if (digest == null) { return monoError(logger, new NullPointerException("'digest' can't be null.")); } return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context) .flatMap(UtilsImpl::deleteResponseToSuccess) .onErrorMap(UtilsImpl::mapException); } /** * Delete the manifest associated with the given digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param digest The digest of the manifest. * @return The completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteManifest(String digest) { return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Delete the manifest associated with the given digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param digest The digest of the manifest. * @return The REST response for completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteManifestWithResponse(String digest) { return withContext(context -> deleteManifestWithResponse(digest, context)); } Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) { return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context) .flatMap(UtilsImpl::deleteResponseToSuccess) .onErrorMap(UtilsImpl::mapException); } }
This is a bit odd to validate the response from the service. When does the service return a digest that was not requested? #Resolved
Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) { if (tagOrDigest == null) { return monoError(logger, new NullPointerException("'tagOrDigest' can't be null.")); } return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context) .flatMap(response -> { String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME); ManifestWrapper wrapper = response.getValue(); if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) { OciManifest ociManifest = new OciManifest() .setAnnotations(wrapper.getAnnotations()) .setConfig(wrapper.getConfig()) .setLayers(wrapper.getLayers()) .setSchemaVersion(wrapper.getSchemaVersion()); Response<OciManifest> res = new ResponseBase<Void, OciManifest>( response.getRequest(), response.getStatusCode(), response.getHeaders(), ociManifest, null); return Mono.just(res); } else { return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest.")); } }).onErrorMap(UtilsImpl::mapException); }
if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) {
return monoError(logger, new NullPointerException("'tagOrDigest' can't be null.")); } return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context) .flatMap(response -> { String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME); ManifestWrapper wrapper = response.getValue(); if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) { OciManifest ociManifest = new OciManifest() .setAnnotations(wrapper.getAnnotations()) .setConfig(wrapper.getConfig()) .setLayers(wrapper.getLayers()) .setSchemaVersion(wrapper.getSchemaVersion()); Response<OciManifest> res = new ResponseBase<Void, OciManifest>( response.getRequest(), response.getStatusCode(), response.getHeaders(), ociManifest, null); return Mono.just(res); } else { return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest.")); } }
class ContainerRegistryBlobAsyncClient { private final AzureContainerRegistryImpl registryImplClient; private final ContainerRegistryBlobsImpl blobsImpl; private final ContainerRegistriesImpl registriesImpl; private final String endpoint; private final String repositoryName; private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class); ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) { this.repositoryName = repositoryName; this.endpoint = endpoint; this.registryImplClient = new AzureContainerRegistryImplBuilder() .url(endpoint) .pipeline(httpPipeline) .apiVersion(version) .buildClient(); this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs(); this.registriesImpl = this.registryImplClient.getContainerRegistries(); } /** * This method returns the registry's repository on which operations are being performed. * * @return The name of the repository */ public String getRepositoryName() { return this.repositoryName; } /** * This method returns the complete registry endpoint. * * @return The registry endpoint including the authority. */ public String getEndpoint() { return this.endpoint; } /** * Upload the Oci manifest to the repository. * The upload is done as a single operation. * @see <a href="https: * @param manifest The OciManifest that needs to be uploaded. * @return operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code manifest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) { if (manifest == null) { return monoError(logger, new NullPointerException("'manifest' can't be null.")); } try { byte[] bytes = this.registryImplClient.getSerializerAdapter().serializeToBytes(manifest, SerializerEncoding.JSON); return withContext(context -> this.uploadManifestWithResponse(ByteBuffer.wrap(bytes), context)).flatMap(FluxUtil::toMono); } catch (IOException exception) { return monoError(logger, new UncheckedIOException(exception)); } } /** * Uploads a manifest to the repository. * The client currently only supports uploading OciManifests to the repository. * And this operation makes the assumption that the data provided is a valid OCI manifest. * <p> * Also, the data is read into memory and then an upload operation is performed as a single operation. * @see <a href="https: * @param data The manifest that needs to be uploaded. * @return operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadManifestResult> uploadManifest(BinaryData data) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono); } /** * Uploads a manifest to the repository. * The client currently only supports uploading OciManifests to the repository. * And this operation makes the assumption that the data provided is a valid OCI manifest. * <p> * Also, the data is read into memory and then an upload operation is performed as a single operation. * @see <a href="https: * * @param data The manifest that needs to be uploaded. * @return The rest response containing the operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)); } Mono<Response<UploadManifestResult>> uploadManifestWithResponse(ByteBuffer data, Context context) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } String digest = UtilsImpl.computeDigest(data); return this.registriesImpl.createManifestWithResponseAsync( repositoryName, digest, Flux.just(data), data.remaining(), UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context).map(response -> { Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>( response.getRequest(), response.getStatusCode(), response.getHeaders(), new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()), response.getDeserializedHeaders()); return res; }).onErrorMap(UtilsImpl::mapException); } /** * Uploads a blob to the repository. * The client currently uploads the entire blob\layer as a single unit. * <p> * The blob is read into memory and then an upload operation is performed as a single operation. * We currently do not support breaking the layer into multiple chunks and uploading them one at a time * * @param data The blob\image content that needs to be uploaded. * @return The operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadBlobResult> uploadBlob(BinaryData data) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono); } /** * Uploads a blob to the repository. * The client currently uploads the entire blob\layer as a single unit. * <p> * The blob is read into memory and then an upload operation is performed as a single operation. * We currently do not support breaking the layer into multiple chunks and uploading them one at a time * * @param data The blob\image content that needs to be uploaded. * @return The rest response containing the operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) { return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)); } Mono<Response<UploadBlobResult>> uploadBlobWithResponse(ByteBuffer data, Context context) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } String digest = UtilsImpl.computeDigest(data); return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context) .flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), Flux.just(data), data.remaining(), context)) .flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest, trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context)) .flatMap(completeUploadResponse -> { Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(), completeUploadResponse.getStatusCode(), completeUploadResponse.getHeaders(), new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()), completeUploadResponse.getDeserializedHeaders()); return Mono.just(res); }).onErrorMap(UtilsImpl::mapException); } private String trimNextLink(String locationHeader) { if (locationHeader.startsWith("/")) { return locationHeader.substring(1); } return locationHeader; } /** * Download the manifest associated with the given tag or digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param tagOrDigest The tag or digest of the manifest. * @return The manifest associated with the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<OciManifest> downloadManifest(String tagOrDigest) { return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono); } /** * Download the manifest associated with the given tag or digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param tagOrDigest The tag or digest of the manifest. * @return The response for the manifest associated with the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest) { return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context)); } Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) { if (tagOrDigest == null) { ).onErrorMap(UtilsImpl::mapException); } /** * Download the blob associated with the given digest. * * @param digest The digest for the given image layer. * @return The image associated with the given digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DownloadBlobResult> downloadBlob(String digest) { return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Download the blob\layer associated with the given digest. * * @param digest The digest for the given image layer. * @return The image associated with the given digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) { return withContext(context -> this.downloadBlobWithResponse(digest, context)); } Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) { if (digest == null) { return monoError(logger, new NullPointerException("'digest' can't be null.")); } return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context).flatMap(streamResponse -> { String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME); return BinaryData.fromFlux(streamResponse.getValue()) .flatMap(binaryData -> { Response<DownloadBlobResult> response = new ResponseBase<HttpHeaders, DownloadBlobResult>( streamResponse.getRequest(), streamResponse.getStatusCode(), streamResponse.getHeaders(), new DownloadBlobResult().setContent(binaryData).setDigest(resDigest), null); return Mono.just(response); }); }).onErrorMap(UtilsImpl::mapException); } /** * Delete the image associated with the given digest * * @param digest The digest for the given image layer. * @return The completion signal. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteBlob(String digest) { return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Delete the image associated with the given digest * * @param digest The digest for the given image layer. * @return The REST response for the completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteBlobWithResponse(String digest) { return withContext(context -> deleteBlobWithResponse(digest, context)); } Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) { if (digest == null) { return monoError(logger, new NullPointerException("'digest' can't be null.")); } return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context) .flatMap(UtilsImpl::deleteResponseToSuccess) .onErrorMap(UtilsImpl::mapException); } /** * Delete the manifest associated with the given digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param digest The digest of the manifest. * @return The completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteManifest(String digest) { return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Delete the manifest associated with the given digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param digest The digest of the manifest. * @return The REST response for completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteManifestWithResponse(String digest) { return withContext(context -> deleteManifestWithResponse(digest, context)); } Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) { return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context) .flatMap(UtilsImpl::deleteResponseToSuccess) .onErrorMap(UtilsImpl::mapException); } }
class ContainerRegistryBlobAsyncClient { private final AzureContainerRegistryImpl registryImplClient; private final ContainerRegistryBlobsImpl blobsImpl; private final ContainerRegistriesImpl registriesImpl; private final String endpoint; private final String repositoryName; private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class); ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) { this.repositoryName = repositoryName; this.endpoint = endpoint; this.registryImplClient = new AzureContainerRegistryImplBuilder() .url(endpoint) .pipeline(httpPipeline) .apiVersion(version) .buildClient(); this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs(); this.registriesImpl = this.registryImplClient.getContainerRegistries(); } /** * This method returns the registry's repository on which operations are being performed. * * @return The name of the repository */ public String getRepositoryName() { return this.repositoryName; } /** * This method returns the complete registry endpoint. * * @return The registry endpoint including the authority. */ public String getEndpoint() { return this.endpoint; } /** * Upload the Oci manifest to the repository. * The upload is done as a single operation. * @see <a href="https: * @param manifest The OciManifest that needs to be uploaded. * @return operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code manifest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) { if (manifest == null) { return monoError(logger, new NullPointerException("'manifest' can't be null.")); } return uploadManifest(BinaryData.fromObject(manifest)); } /** * Uploads a manifest to the repository. * The client currently only supports uploading OciManifests to the repository. * And this operation makes the assumption that the data provided is a valid OCI manifest. * <p> * Also, the data is read into memory and then an upload operation is performed as a single operation. * @see <a href="https: * @param data The manifest that needs to be uploaded. * @return operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadManifestResult> uploadManifest(BinaryData data) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono); } /** * Uploads a manifest to the repository. * The client currently only supports uploading OciManifests to the repository. * And this operation makes the assumption that the data provided is a valid OCI manifest. * <p> * Also, the data is read into memory and then an upload operation is performed as a single operation. * @see <a href="https: * * @param data The manifest that needs to be uploaded. * @return The rest response containing the operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)); } Mono<Response<UploadManifestResult>> uploadManifestWithResponse(ByteBuffer data, Context context) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } String digest = UtilsImpl.computeDigest(data); return this.registriesImpl.createManifestWithResponseAsync( repositoryName, digest, Flux.just(data), data.remaining(), UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context).map(response -> { Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>( response.getRequest(), response.getStatusCode(), response.getHeaders(), new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()), response.getDeserializedHeaders()); return res; }).onErrorMap(UtilsImpl::mapException); } /** * Uploads a blob to the repository. * The client currently uploads the entire blob\layer as a single unit. * <p> * The blob is read into memory and then an upload operation is performed as a single operation. * We currently do not support breaking the layer into multiple chunks and uploading them one at a time * * @param data The blob\image content that needs to be uploaded. * @return The operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadBlobResult> uploadBlob(BinaryData data) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono); } /** * Uploads a blob to the repository. * The client currently uploads the entire blob\layer as a single unit. * <p> * The blob is read into memory and then an upload operation is performed as a single operation. * We currently do not support breaking the layer into multiple chunks and uploading them one at a time * * @param data The blob\image content that needs to be uploaded. * @return The rest response containing the operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) { return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)); } Mono<Response<UploadBlobResult>> uploadBlobWithResponse(ByteBuffer data, Context context) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } String digest = UtilsImpl.computeDigest(data); return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context) .flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), Flux.just(data), data.remaining(), context)) .flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest, trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context)) .flatMap(completeUploadResponse -> { Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(), completeUploadResponse.getStatusCode(), completeUploadResponse.getHeaders(), new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()), completeUploadResponse.getDeserializedHeaders()); return Mono.just(res); }).onErrorMap(UtilsImpl::mapException); } private String trimNextLink(String locationHeader) { if (locationHeader.startsWith("/")) { return locationHeader.substring(1); } return locationHeader; } /** * Download the manifest associated with the given tag or digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param tagOrDigest The tag or digest of the manifest. * @return The manifest associated with the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<OciManifest> downloadManifest(String tagOrDigest) { return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono); } /** * Download the manifest associated with the given tag or digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param tagOrDigest The tag or digest of the manifest. * @return The response for the manifest associated with the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest) { return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context)); } Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) { if (tagOrDigest == null) { ).onErrorMap(UtilsImpl::mapException); } /** * Download the blob associated with the given digest. * * @param digest The digest for the given image layer. * @return The image associated with the given digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DownloadBlobResult> downloadBlob(String digest) { return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Download the blob\layer associated with the given digest. * * @param digest The digest for the given image layer. * @return The image associated with the given digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) { return withContext(context -> this.downloadBlobWithResponse(digest, context)); } Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) { if (digest == null) { return monoError(logger, new NullPointerException("'digest' can't be null.")); } return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context).flatMap(streamResponse -> { String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME); return BinaryData.fromFlux(streamResponse.getValue()) .flatMap(binaryData -> { Response<DownloadBlobResult> response = new ResponseBase<HttpHeaders, DownloadBlobResult>( streamResponse.getRequest(), streamResponse.getStatusCode(), streamResponse.getHeaders(), new DownloadBlobResult(resDigest, binaryData), null); return Mono.just(response); }); }).onErrorMap(UtilsImpl::mapException); } /** * Delete the image associated with the given digest * * @param digest The digest for the given image layer. * @return The completion signal. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteBlob(String digest) { return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Delete the image associated with the given digest * * @param digest The digest for the given image layer. * @return The REST response for the completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteBlobWithResponse(String digest) { return withContext(context -> deleteBlobWithResponse(digest, context)); } Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) { if (digest == null) { return monoError(logger, new NullPointerException("'digest' can't be null.")); } return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context) .flatMap(UtilsImpl::deleteResponseToSuccess) .onErrorMap(UtilsImpl::mapException); } /** * Delete the manifest associated with the given digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param digest The digest of the manifest. * @return The completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteManifest(String digest) { return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Delete the manifest associated with the given digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param digest The digest of the manifest. * @return The REST response for completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteManifestWithResponse(String digest) { return withContext(context -> deleteManifestWithResponse(digest, context)); } Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) { return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context) .flatMap(UtilsImpl::deleteResponseToSuccess) .onErrorMap(UtilsImpl::mapException); } }
Why would the service return a digest that doesn't match the requested digest? #Resolved
Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) { if (tagOrDigest == null) { return monoError(logger, new NullPointerException("'tagOrDigest' can't be null.")); } return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context) .flatMap(response -> { String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME); ManifestWrapper wrapper = response.getValue(); if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) { OciManifest ociManifest = new OciManifest() .setAnnotations(wrapper.getAnnotations()) .setConfig(wrapper.getConfig()) .setLayers(wrapper.getLayers()) .setSchemaVersion(wrapper.getSchemaVersion()); Response<OciManifest> res = new ResponseBase<Void, OciManifest>( response.getRequest(), response.getStatusCode(), response.getHeaders(), ociManifest, null); return Mono.just(res); } else { return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest.")); } }).onErrorMap(UtilsImpl::mapException); }
return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest."));
return monoError(logger, new NullPointerException("'tagOrDigest' can't be null.")); } return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context) .flatMap(response -> { String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME); ManifestWrapper wrapper = response.getValue(); if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) { OciManifest ociManifest = new OciManifest() .setAnnotations(wrapper.getAnnotations()) .setConfig(wrapper.getConfig()) .setLayers(wrapper.getLayers()) .setSchemaVersion(wrapper.getSchemaVersion()); Response<OciManifest> res = new ResponseBase<Void, OciManifest>( response.getRequest(), response.getStatusCode(), response.getHeaders(), ociManifest, null); return Mono.just(res); } else { return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest.")); } }
class ContainerRegistryBlobAsyncClient { private final AzureContainerRegistryImpl registryImplClient; private final ContainerRegistryBlobsImpl blobsImpl; private final ContainerRegistriesImpl registriesImpl; private final String endpoint; private final String repositoryName; private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class); ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) { this.repositoryName = repositoryName; this.endpoint = endpoint; this.registryImplClient = new AzureContainerRegistryImplBuilder() .url(endpoint) .pipeline(httpPipeline) .apiVersion(version) .buildClient(); this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs(); this.registriesImpl = this.registryImplClient.getContainerRegistries(); } /** * This method returns the registry's repository on which operations are being performed. * * @return The name of the repository */ public String getRepositoryName() { return this.repositoryName; } /** * This method returns the complete registry endpoint. * * @return The registry endpoint including the authority. */ public String getEndpoint() { return this.endpoint; } /** * Upload the Oci manifest to the repository. * The upload is done as a single operation. * @see <a href="https: * @param manifest The OciManifest that needs to be uploaded. * @return operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code manifest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) { if (manifest == null) { return monoError(logger, new NullPointerException("'manifest' can't be null.")); } try { byte[] bytes = this.registryImplClient.getSerializerAdapter().serializeToBytes(manifest, SerializerEncoding.JSON); return withContext(context -> this.uploadManifestWithResponse(ByteBuffer.wrap(bytes), context)).flatMap(FluxUtil::toMono); } catch (IOException exception) { return monoError(logger, new UncheckedIOException(exception)); } } /** * Uploads a manifest to the repository. * The client currently only supports uploading OciManifests to the repository. * And this operation makes the assumption that the data provided is a valid OCI manifest. * <p> * Also, the data is read into memory and then an upload operation is performed as a single operation. * @see <a href="https: * @param data The manifest that needs to be uploaded. * @return operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadManifestResult> uploadManifest(BinaryData data) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono); } /** * Uploads a manifest to the repository. * The client currently only supports uploading OciManifests to the repository. * And this operation makes the assumption that the data provided is a valid OCI manifest. * <p> * Also, the data is read into memory and then an upload operation is performed as a single operation. * @see <a href="https: * * @param data The manifest that needs to be uploaded. * @return The rest response containing the operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)); } Mono<Response<UploadManifestResult>> uploadManifestWithResponse(ByteBuffer data, Context context) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } String digest = UtilsImpl.computeDigest(data); return this.registriesImpl.createManifestWithResponseAsync( repositoryName, digest, Flux.just(data), data.remaining(), UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context).map(response -> { Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>( response.getRequest(), response.getStatusCode(), response.getHeaders(), new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()), response.getDeserializedHeaders()); return res; }).onErrorMap(UtilsImpl::mapException); } /** * Uploads a blob to the repository. * The client currently uploads the entire blob\layer as a single unit. * <p> * The blob is read into memory and then an upload operation is performed as a single operation. * We currently do not support breaking the layer into multiple chunks and uploading them one at a time * * @param data The blob\image content that needs to be uploaded. * @return The operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadBlobResult> uploadBlob(BinaryData data) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono); } /** * Uploads a blob to the repository. * The client currently uploads the entire blob\layer as a single unit. * <p> * The blob is read into memory and then an upload operation is performed as a single operation. * We currently do not support breaking the layer into multiple chunks and uploading them one at a time * * @param data The blob\image content that needs to be uploaded. * @return The rest response containing the operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) { return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)); } Mono<Response<UploadBlobResult>> uploadBlobWithResponse(ByteBuffer data, Context context) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } String digest = UtilsImpl.computeDigest(data); return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context) .flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), Flux.just(data), data.remaining(), context)) .flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest, trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context)) .flatMap(completeUploadResponse -> { Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(), completeUploadResponse.getStatusCode(), completeUploadResponse.getHeaders(), new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()), completeUploadResponse.getDeserializedHeaders()); return Mono.just(res); }).onErrorMap(UtilsImpl::mapException); } private String trimNextLink(String locationHeader) { if (locationHeader.startsWith("/")) { return locationHeader.substring(1); } return locationHeader; } /** * Download the manifest associated with the given tag or digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param tagOrDigest The tag or digest of the manifest. * @return The manifest associated with the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<OciManifest> downloadManifest(String tagOrDigest) { return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono); } /** * Download the manifest associated with the given tag or digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param tagOrDigest The tag or digest of the manifest. * @return The response for the manifest associated with the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest) { return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context)); } Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) { if (tagOrDigest == null) { ).onErrorMap(UtilsImpl::mapException); } /** * Download the blob associated with the given digest. * * @param digest The digest for the given image layer. * @return The image associated with the given digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DownloadBlobResult> downloadBlob(String digest) { return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Download the blob\layer associated with the given digest. * * @param digest The digest for the given image layer. * @return The image associated with the given digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) { return withContext(context -> this.downloadBlobWithResponse(digest, context)); } Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) { if (digest == null) { return monoError(logger, new NullPointerException("'digest' can't be null.")); } return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context).flatMap(streamResponse -> { String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME); return BinaryData.fromFlux(streamResponse.getValue()) .flatMap(binaryData -> { Response<DownloadBlobResult> response = new ResponseBase<HttpHeaders, DownloadBlobResult>( streamResponse.getRequest(), streamResponse.getStatusCode(), streamResponse.getHeaders(), new DownloadBlobResult().setContent(binaryData).setDigest(resDigest), null); return Mono.just(response); }); }).onErrorMap(UtilsImpl::mapException); } /** * Delete the image associated with the given digest * * @param digest The digest for the given image layer. * @return The completion signal. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteBlob(String digest) { return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Delete the image associated with the given digest * * @param digest The digest for the given image layer. * @return The REST response for the completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteBlobWithResponse(String digest) { return withContext(context -> deleteBlobWithResponse(digest, context)); } Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) { if (digest == null) { return monoError(logger, new NullPointerException("'digest' can't be null.")); } return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context) .flatMap(UtilsImpl::deleteResponseToSuccess) .onErrorMap(UtilsImpl::mapException); } /** * Delete the manifest associated with the given digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param digest The digest of the manifest. * @return The completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteManifest(String digest) { return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Delete the manifest associated with the given digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param digest The digest of the manifest. * @return The REST response for completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteManifestWithResponse(String digest) { return withContext(context -> deleteManifestWithResponse(digest, context)); } Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) { return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context) .flatMap(UtilsImpl::deleteResponseToSuccess) .onErrorMap(UtilsImpl::mapException); } }
class ContainerRegistryBlobAsyncClient { private final AzureContainerRegistryImpl registryImplClient; private final ContainerRegistryBlobsImpl blobsImpl; private final ContainerRegistriesImpl registriesImpl; private final String endpoint; private final String repositoryName; private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class); ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) { this.repositoryName = repositoryName; this.endpoint = endpoint; this.registryImplClient = new AzureContainerRegistryImplBuilder() .url(endpoint) .pipeline(httpPipeline) .apiVersion(version) .buildClient(); this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs(); this.registriesImpl = this.registryImplClient.getContainerRegistries(); } /** * This method returns the registry's repository on which operations are being performed. * * @return The name of the repository */ public String getRepositoryName() { return this.repositoryName; } /** * This method returns the complete registry endpoint. * * @return The registry endpoint including the authority. */ public String getEndpoint() { return this.endpoint; } /** * Upload the Oci manifest to the repository. * The upload is done as a single operation. * @see <a href="https: * @param manifest The OciManifest that needs to be uploaded. * @return operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code manifest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) { if (manifest == null) { return monoError(logger, new NullPointerException("'manifest' can't be null.")); } return uploadManifest(BinaryData.fromObject(manifest)); } /** * Uploads a manifest to the repository. * The client currently only supports uploading OciManifests to the repository. * And this operation makes the assumption that the data provided is a valid OCI manifest. * <p> * Also, the data is read into memory and then an upload operation is performed as a single operation. * @see <a href="https: * @param data The manifest that needs to be uploaded. * @return operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadManifestResult> uploadManifest(BinaryData data) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono); } /** * Uploads a manifest to the repository. * The client currently only supports uploading OciManifests to the repository. * And this operation makes the assumption that the data provided is a valid OCI manifest. * <p> * Also, the data is read into memory and then an upload operation is performed as a single operation. * @see <a href="https: * * @param data The manifest that needs to be uploaded. * @return The rest response containing the operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), context)); } Mono<Response<UploadManifestResult>> uploadManifestWithResponse(ByteBuffer data, Context context) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } String digest = UtilsImpl.computeDigest(data); return this.registriesImpl.createManifestWithResponseAsync( repositoryName, digest, Flux.just(data), data.remaining(), UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context).map(response -> { Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>( response.getRequest(), response.getStatusCode(), response.getHeaders(), new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()), response.getDeserializedHeaders()); return res; }).onErrorMap(UtilsImpl::mapException); } /** * Uploads a blob to the repository. * The client currently uploads the entire blob\layer as a single unit. * <p> * The blob is read into memory and then an upload operation is performed as a single operation. * We currently do not support breaking the layer into multiple chunks and uploading them one at a time * * @param data The blob\image content that needs to be uploaded. * @return The operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadBlobResult> uploadBlob(BinaryData data) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono); } /** * Uploads a blob to the repository. * The client currently uploads the entire blob\layer as a single unit. * <p> * The blob is read into memory and then an upload operation is performed as a single operation. * We currently do not support breaking the layer into multiple chunks and uploading them one at a time * * @param data The blob\image content that needs to be uploaded. * @return The rest response containing the operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) { return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)); } Mono<Response<UploadBlobResult>> uploadBlobWithResponse(ByteBuffer data, Context context) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } String digest = UtilsImpl.computeDigest(data); return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context) .flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), Flux.just(data), data.remaining(), context)) .flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest, trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context)) .flatMap(completeUploadResponse -> { Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(), completeUploadResponse.getStatusCode(), completeUploadResponse.getHeaders(), new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()), completeUploadResponse.getDeserializedHeaders()); return Mono.just(res); }).onErrorMap(UtilsImpl::mapException); } private String trimNextLink(String locationHeader) { if (locationHeader.startsWith("/")) { return locationHeader.substring(1); } return locationHeader; } /** * Download the manifest associated with the given tag or digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param tagOrDigest The tag or digest of the manifest. * @return The manifest associated with the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<OciManifest> downloadManifest(String tagOrDigest) { return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono); } /** * Download the manifest associated with the given tag or digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param tagOrDigest The tag or digest of the manifest. * @return The response for the manifest associated with the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest) { return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context)); } Mono<Response<OciManifest>> downloadManifestWithResponse(String tagOrDigest, Context context) { if (tagOrDigest == null) { ).onErrorMap(UtilsImpl::mapException); } /** * Download the blob associated with the given digest. * * @param digest The digest for the given image layer. * @return The image associated with the given digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DownloadBlobResult> downloadBlob(String digest) { return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Download the blob\layer associated with the given digest. * * @param digest The digest for the given image layer. * @return The image associated with the given digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) { return withContext(context -> this.downloadBlobWithResponse(digest, context)); } Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) { if (digest == null) { return monoError(logger, new NullPointerException("'digest' can't be null.")); } return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context).flatMap(streamResponse -> { String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME); return BinaryData.fromFlux(streamResponse.getValue()) .flatMap(binaryData -> { Response<DownloadBlobResult> response = new ResponseBase<HttpHeaders, DownloadBlobResult>( streamResponse.getRequest(), streamResponse.getStatusCode(), streamResponse.getHeaders(), new DownloadBlobResult(resDigest, binaryData), null); return Mono.just(response); }); }).onErrorMap(UtilsImpl::mapException); } /** * Delete the image associated with the given digest * * @param digest The digest for the given image layer. * @return The completion signal. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteBlob(String digest) { return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Delete the image associated with the given digest * * @param digest The digest for the given image layer. * @return The REST response for the completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteBlobWithResponse(String digest) { return withContext(context -> deleteBlobWithResponse(digest, context)); } Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) { if (digest == null) { return monoError(logger, new NullPointerException("'digest' can't be null.")); } return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context) .flatMap(UtilsImpl::deleteResponseToSuccess) .onErrorMap(UtilsImpl::mapException); } /** * Delete the manifest associated with the given digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param digest The digest of the manifest. * @return The completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteManifest(String digest) { return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Delete the manifest associated with the given digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param digest The digest of the manifest. * @return The REST response for completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteManifestWithResponse(String digest) { return withContext(context -> deleteManifestWithResponse(digest, context)); } Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) { return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context) .flatMap(UtilsImpl::deleteResponseToSuccess) .onErrorMap(UtilsImpl::mapException); } }
I think calling this method every time, whether log empty diagnostics is enabled or not is not good. We should have the check on this method, so that we don't even go into the execution of this method if the flag is disabled (avoids creating unnecessary method stack and saves some small resources and computation).
public Flux<FeedResponse<T>> apply(Flux<DocumentProducer<T>.DocumentProducerFeedResponse> source) { return source.filter(documentProducerFeedResponse -> { if (documentProducerFeedResponse.pageResult.getResults().isEmpty() && !ModelBridgeInternal .getEmptyPagesAllowedFromQueryRequestOptions(this.cosmosQueryRequestOptions)) { tracker.addCharge(documentProducerFeedResponse.pageResult.getRequestCharge()); ConcurrentMap<String, QueryMetrics> currentQueryMetrics = BridgeInternal.queryMetricsFromFeedResponse(documentProducerFeedResponse.pageResult); QueryMetrics.mergeQueryMetricsMap(emptyPageQueryMetricsMap, currentQueryMetrics); cosmosDiagnostics = documentProducerFeedResponse.pageResult.getCosmosDiagnostics(); logEmptyPageDiagnostics( cosmosDiagnostics, this.cosmosQueryRequestOptions, this.correlatedActivityId, documentProducerFeedResponse.pageResult.getActivityId()); return false; } return true; }).map(documentProducerFeedResponse -> { if (!emptyPageQueryMetricsMap.isEmpty()) { ConcurrentMap<String, QueryMetrics> currentQueryMetrics = BridgeInternal.queryMetricsFromFeedResponse(documentProducerFeedResponse.pageResult); QueryMetrics.mergeQueryMetricsMap(currentQueryMetrics, emptyPageQueryMetricsMap); emptyPageQueryMetricsMap.clear(); } double charge = tracker.getAndResetCharge(); if (charge > 0) { return new ValueHolder<>(plusCharge(documentProducerFeedResponse, charge)); } else { return new ValueHolder<>(documentProducerFeedResponse); } }).concatWith(Flux.just(new ValueHolder<>(null))).map(heldValue -> { DocumentProducer<T>.DocumentProducerFeedResponse documentProducerFeedResponse = heldValue.v; ImmutablePair<DocumentProducer<T>.DocumentProducerFeedResponse, DocumentProducer<T>.DocumentProducerFeedResponse> previousCurrent = new ImmutablePair<>( this.previousPage, documentProducerFeedResponse); this.previousPage = documentProducerFeedResponse; return previousCurrent; }).skip(1).map(currentNext -> { DocumentProducer<T>.DocumentProducerFeedResponse current = currentNext.left; DocumentProducer<T>.DocumentProducerFeedResponse next = currentNext.right; String compositeContinuationToken; String backendContinuationToken = current.pageResult.getContinuationToken(); if (backendContinuationToken == null) { if (next == null) { compositeContinuationToken = null; } else { CompositeContinuationToken compositeContinuationTokenDom = new CompositeContinuationToken(null, next.sourceFeedRange.getRange()); compositeContinuationToken = compositeContinuationTokenDom.toJson(); } } else { CompositeContinuationToken compositeContinuationTokenDom = new CompositeContinuationToken( backendContinuationToken, current.sourceFeedRange.getRange()); compositeContinuationToken = compositeContinuationTokenDom.toJson(); } DocumentProducer<T>.DocumentProducerFeedResponse page; page = current; page = this.addCompositeContinuationToken(page, compositeContinuationToken); return page; }).map(documentProducerFeedResponse -> { return documentProducerFeedResponse.pageResult; }).switchIfEmpty(Flux.defer(() -> { return Flux.just(BridgeInternal.createFeedResponseWithQueryMetrics(Utils.immutableListOf(), headerResponse(tracker.getAndResetCharge()), emptyPageQueryMetricsMap, null, false, false, cosmosDiagnostics)); })); }
logEmptyPageDiagnostics(
Combining previous empty page query metrics with current non empty page query metrics if (!emptyPageQueryMetricsMap.isEmpty()) { ConcurrentMap<String, QueryMetrics> currentQueryMetrics = BridgeInternal.queryMetricsFromFeedResponse(documentProducerFeedResponse.pageResult); QueryMetrics.mergeQueryMetricsMap(currentQueryMetrics, emptyPageQueryMetricsMap); emptyPageQueryMetricsMap.clear(); }
class EmptyPagesFilterTransformer<T extends Resource> implements Function<Flux<DocumentProducer<T>.DocumentProducerFeedResponse>, Flux<FeedResponse<T>>> { private final RequestChargeTracker tracker; private DocumentProducer<T>.DocumentProducerFeedResponse previousPage; private final CosmosQueryRequestOptions cosmosQueryRequestOptions; private final UUID correlatedActivityId; private ConcurrentMap<String, QueryMetrics> emptyPageQueryMetricsMap = new ConcurrentHashMap<>(); private CosmosDiagnostics cosmosDiagnostics; public EmptyPagesFilterTransformer(RequestChargeTracker tracker, CosmosQueryRequestOptions options, UUID correlatedActivityId) { if (tracker == null) { throw new IllegalArgumentException("Request Charge Tracker must not be null."); } this.tracker = tracker; this.previousPage = null; this.cosmosQueryRequestOptions = options; this.correlatedActivityId = correlatedActivityId; } private DocumentProducer<T>.DocumentProducerFeedResponse plusCharge( DocumentProducer<T>.DocumentProducerFeedResponse documentProducerFeedResponse, double charge) { FeedResponse<T> page = documentProducerFeedResponse.pageResult; Map<String, String> headers = new HashMap<>(page.getResponseHeaders()); double pageCharge = page.getRequestCharge(); pageCharge += charge; headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, String.valueOf(pageCharge)); FeedResponse<T> newPage = BridgeInternal.createFeedResponseWithQueryMetrics(page.getResults(), headers, BridgeInternal.queryMetricsFromFeedResponse(page), ModelBridgeInternal.getQueryPlanDiagnosticsContext(page), false, false, page.getCosmosDiagnostics()); documentProducerFeedResponse.pageResult = newPage; return documentProducerFeedResponse; } private DocumentProducer<T>.DocumentProducerFeedResponse addCompositeContinuationToken( DocumentProducer<T>.DocumentProducerFeedResponse documentProducerFeedResponse, String compositeContinuationToken) { FeedResponse<T> page = documentProducerFeedResponse.pageResult; Map<String, String> headers = new HashMap<>(page.getResponseHeaders()); headers.put(HttpConstants.HttpHeaders.CONTINUATION, compositeContinuationToken); FeedResponse<T> newPage = BridgeInternal.createFeedResponseWithQueryMetrics(page.getResults(), headers, BridgeInternal.queryMetricsFromFeedResponse(page), ModelBridgeInternal.getQueryPlanDiagnosticsContext(page), false, false, page.getCosmosDiagnostics() ); documentProducerFeedResponse.pageResult = newPage; return documentProducerFeedResponse; } private static Map<String, String> headerResponse( double requestCharge) { return Utils.immutableMapOf(HttpConstants.HttpHeaders.REQUEST_CHARGE, String.valueOf(requestCharge)); } @Override public Flux<FeedResponse<T>> apply(Flux<DocumentProducer<T>.DocumentProducerFeedResponse> source) { return source.filter(documentProducerFeedResponse -> { if (documentProducerFeedResponse.pageResult.getResults().isEmpty() && !ModelBridgeInternal .getEmptyPagesAllowedFromQueryRequestOptions(this.cosmosQueryRequestOptions)) { tracker.addCharge(documentProducerFeedResponse.pageResult.getRequestCharge()); ConcurrentMap<String, QueryMetrics> currentQueryMetrics = BridgeInternal.queryMetricsFromFeedResponse(documentProducerFeedResponse.pageResult); QueryMetrics.mergeQueryMetricsMap(emptyPageQueryMetricsMap, currentQueryMetrics); cosmosDiagnostics = documentProducerFeedResponse.pageResult.getCosmosDiagnostics(); logEmptyPageDiagnostics( cosmosDiagnostics, this.cosmosQueryRequestOptions, this.correlatedActivityId, documentProducerFeedResponse.pageResult.getActivityId()); return false; } return true; }).map(documentProducerFeedResponse -> { double charge = tracker.getAndResetCharge(); if (charge > 0) { return new ValueHolder<>(plusCharge(documentProducerFeedResponse, charge)); } else { return new ValueHolder<>(documentProducerFeedResponse); } }).concatWith(Flux.just(new ValueHolder<>(null))).map(heldValue -> { DocumentProducer<T>.DocumentProducerFeedResponse documentProducerFeedResponse = heldValue.v; ImmutablePair<DocumentProducer<T>.DocumentProducerFeedResponse, DocumentProducer<T>.DocumentProducerFeedResponse> previousCurrent = new ImmutablePair<>( this.previousPage, documentProducerFeedResponse); this.previousPage = documentProducerFeedResponse; return previousCurrent; }).skip(1).map(currentNext -> { DocumentProducer<T>.DocumentProducerFeedResponse current = currentNext.left; DocumentProducer<T>.DocumentProducerFeedResponse next = currentNext.right; String compositeContinuationToken; String backendContinuationToken = current.pageResult.getContinuationToken(); if (backendContinuationToken == null) { if (next == null) { compositeContinuationToken = null; } else { CompositeContinuationToken compositeContinuationTokenDom = new CompositeContinuationToken(null, next.sourceFeedRange.getRange()); compositeContinuationToken = compositeContinuationTokenDom.toJson(); } } else { CompositeContinuationToken compositeContinuationTokenDom = new CompositeContinuationToken( backendContinuationToken, current.sourceFeedRange.getRange()); compositeContinuationToken = compositeContinuationTokenDom.toJson(); } DocumentProducer<T>.DocumentProducerFeedResponse page; page = current; page = this.addCompositeContinuationToken(page, compositeContinuationToken); return page; }).map(documentProducerFeedResponse -> { return documentProducerFeedResponse.pageResult; }).switchIfEmpty(Flux.defer(() -> { return Flux.just(BridgeInternal.createFeedResponseWithQueryMetrics(Utils.immutableListOf(), headerResponse(tracker.getAndResetCharge()), emptyPageQueryMetricsMap, null, false, false, cosmosDiagnostics)); })); } }
class EmptyPagesFilterTransformer<T extends Resource> implements Function<Flux<DocumentProducer<T>.DocumentProducerFeedResponse>, Flux<FeedResponse<T>>> { private final RequestChargeTracker tracker; private DocumentProducer<T>.DocumentProducerFeedResponse previousPage; private final CosmosQueryRequestOptions cosmosQueryRequestOptions; private final UUID correlatedActivityId; private ConcurrentMap<String, QueryMetrics> emptyPageQueryMetricsMap = new ConcurrentHashMap<>(); private CosmosDiagnostics cosmosDiagnostics; public EmptyPagesFilterTransformer(RequestChargeTracker tracker, CosmosQueryRequestOptions options, UUID correlatedActivityId) { if (tracker == null) { throw new IllegalArgumentException("Request Charge Tracker must not be null."); } this.tracker = tracker; this.previousPage = null; this.cosmosQueryRequestOptions = options; this.correlatedActivityId = correlatedActivityId; } private DocumentProducer<T>.DocumentProducerFeedResponse plusCharge( DocumentProducer<T>.DocumentProducerFeedResponse documentProducerFeedResponse, double charge) { FeedResponse<T> page = documentProducerFeedResponse.pageResult; Map<String, String> headers = new HashMap<>(page.getResponseHeaders()); double pageCharge = page.getRequestCharge(); pageCharge += charge; headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, String.valueOf(pageCharge)); FeedResponse<T> newPage = BridgeInternal.createFeedResponseWithQueryMetrics(page.getResults(), headers, BridgeInternal.queryMetricsFromFeedResponse(page), ModelBridgeInternal.getQueryPlanDiagnosticsContext(page), false, false, page.getCosmosDiagnostics()); documentProducerFeedResponse.pageResult = newPage; return documentProducerFeedResponse; } private DocumentProducer<T>.DocumentProducerFeedResponse addCompositeContinuationToken( DocumentProducer<T>.DocumentProducerFeedResponse documentProducerFeedResponse, String compositeContinuationToken) { FeedResponse<T> page = documentProducerFeedResponse.pageResult; Map<String, String> headers = new HashMap<>(page.getResponseHeaders()); headers.put(HttpConstants.HttpHeaders.CONTINUATION, compositeContinuationToken); FeedResponse<T> newPage = BridgeInternal.createFeedResponseWithQueryMetrics(page.getResults(), headers, BridgeInternal.queryMetricsFromFeedResponse(page), ModelBridgeInternal.getQueryPlanDiagnosticsContext(page), false, false, page.getCosmosDiagnostics() ); documentProducerFeedResponse.pageResult = newPage; return documentProducerFeedResponse; } private static Map<String, String> headerResponse( double requestCharge) { return Utils.immutableMapOf(HttpConstants.HttpHeaders.REQUEST_CHARGE, String.valueOf(requestCharge)); } @Override public Flux<FeedResponse<T>> apply(Flux<DocumentProducer<T>.DocumentProducerFeedResponse> source) { return source.filter(documentProducerFeedResponse -> { if (documentProducerFeedResponse.pageResult.getResults().isEmpty() && !ModelBridgeInternal .getEmptyPagesAllowedFromQueryRequestOptions(this.cosmosQueryRequestOptions)) { tracker.addCharge(documentProducerFeedResponse.pageResult.getRequestCharge()); ConcurrentMap<String, QueryMetrics> currentQueryMetrics = BridgeInternal.queryMetricsFromFeedResponse(documentProducerFeedResponse.pageResult); QueryMetrics.mergeQueryMetricsMap(emptyPageQueryMetricsMap, currentQueryMetrics); cosmosDiagnostics = documentProducerFeedResponse.pageResult.getCosmosDiagnostics(); if (ImplementationBridgeHelpers .CosmosQueryRequestOptionsHelper .getCosmosQueryRequestOptionsAccessor() .isEmptyPageDiagnosticsEnabled(cosmosQueryRequestOptions)) { logEmptyPageDiagnostics( cosmosDiagnostics, this.correlatedActivityId, documentProducerFeedResponse.pageResult.getActivityId()); } return false; } return true; }).map(documentProducerFeedResponse -> { double charge = tracker.getAndResetCharge(); if (charge > 0) { return new ValueHolder<>(plusCharge(documentProducerFeedResponse, charge)); } else { return new ValueHolder<>(documentProducerFeedResponse); } }).concatWith(Flux.just(new ValueHolder<>(null))).map(heldValue -> { DocumentProducer<T>.DocumentProducerFeedResponse documentProducerFeedResponse = heldValue.v; ImmutablePair<DocumentProducer<T>.DocumentProducerFeedResponse, DocumentProducer<T>.DocumentProducerFeedResponse> previousCurrent = new ImmutablePair<>( this.previousPage, documentProducerFeedResponse); this.previousPage = documentProducerFeedResponse; return previousCurrent; }).skip(1).map(currentNext -> { DocumentProducer<T>.DocumentProducerFeedResponse current = currentNext.left; DocumentProducer<T>.DocumentProducerFeedResponse next = currentNext.right; String compositeContinuationToken; String backendContinuationToken = current.pageResult.getContinuationToken(); if (backendContinuationToken == null) { if (next == null) { compositeContinuationToken = null; } else { CompositeContinuationToken compositeContinuationTokenDom = new CompositeContinuationToken(null, next.sourceFeedRange.getRange()); compositeContinuationToken = compositeContinuationTokenDom.toJson(); } } else { CompositeContinuationToken compositeContinuationTokenDom = new CompositeContinuationToken( backendContinuationToken, current.sourceFeedRange.getRange()); compositeContinuationToken = compositeContinuationTokenDom.toJson(); } DocumentProducer<T>.DocumentProducerFeedResponse page; page = current; page = this.addCompositeContinuationToken(page, compositeContinuationToken); return page; }).map(documentProducerFeedResponse -> { return documentProducerFeedResponse.pageResult; }).switchIfEmpty(Flux.defer(() -> { return Flux.just(BridgeInternal.createFeedResponseWithQueryMetrics(Utils.immutableListOf(), headerResponse(tracker.getAndResetCharge()), emptyPageQueryMetricsMap, null, false, false, cosmosDiagnostics)); })); } }
Delete unused field: `mockObjectMapper`.
public void setup() { MockitoAnnotations.openMocks(this); try { when(mockClosableHttpResponse.getStatusLine()) .thenReturn(new BasicStatusLine(new ProtocolVersion("", 0, 0), 200, "")); when(mockClosableHttpResponse.getEntity()).thenReturn(mockHttpEntity); when(mockHttpEntity.getContent()).thenReturn(mockInputStream); } catch (Exception e) { fail(); } }
when(mockClosableHttpResponse.getEntity()).thenReturn(mockHttpEntity);
public void setup() { MockitoAnnotations.openMocks(this); try { when(mockClosableHttpResponse.getStatusLine()) .thenReturn(new BasicStatusLine(new ProtocolVersion("", 0, 0), 200, "")); when(mockClosableHttpResponse.getEntity()).thenReturn(mockHttpEntity); when(mockHttpEntity.getContent()).thenReturn(mockInputStream); } catch (Exception e) { fail(); } }
class AppConfigurationBootstrapConfigurationTest { private static final ApplicationContextRunner contextRunner = new ApplicationContextRunner() .withPropertyValues(propPair(STORE_ENDPOINT_PROP, TEST_STORE_NAME)) .withConfiguration(AutoConfigurations.of(AppConfigurationBootstrapConfiguration.class)); @Mock private CloseableHttpResponse mockClosableHttpResponse; @Mock HttpEntity mockHttpEntity; @Mock InputStream mockInputStream; @Mock ObjectMapper mockObjectMapper; @Mock ClientStore clientStoreMock; @Before @Test public void iniConnectionStringSystemAssigned() throws Exception { contextRunner.withPropertyValues(propPair(FAIL_FAST_PROP, "false")) .run(context -> assertThat(context).hasSingleBean(AppConfigurationPropertySourceLocator.class)); } @Test public void iniConnectionStringUserAssigned() throws Exception { contextRunner .withPropertyValues(propPair(FAIL_FAST_PROP, "false"), propPair("spring.cloud.azure.appconfiguration.managed-identity.client-id", "client-id")) .run(context -> assertThat(context).hasSingleBean(AppConfigurationPropertySourceLocator.class)); } @Test public void propertySourceLocatorBeanCreated() throws Exception { contextRunner .withPropertyValues(propPair(CONN_STRING_PROP, TEST_CONN_STRING), propPair(FAIL_FAST_PROP, "false")) .run(context -> assertThat(context).hasSingleBean(AppConfigurationPropertySourceLocator.class)); } @Test public void clientsBeanCreated() throws Exception { contextRunner .withPropertyValues(propPair(CONN_STRING_PROP, TEST_CONN_STRING), propPair(FAIL_FAST_PROP, "false")) .run(context -> assertThat(context).hasSingleBean(ClientStore.class)); } }
class AppConfigurationBootstrapConfigurationTest { private static final ApplicationContextRunner contextRunner = new ApplicationContextRunner() .withPropertyValues(propPair(STORE_ENDPOINT_PROP, TEST_STORE_NAME)) .withConfiguration(AutoConfigurations.of(AppConfigurationBootstrapConfiguration.class)); @Mock private CloseableHttpResponse mockClosableHttpResponse; @Mock HttpEntity mockHttpEntity; @Mock InputStream mockInputStream; @Before @Test public void iniConnectionStringSystemAssigned() { contextRunner.withPropertyValues(propPair(FAIL_FAST_PROP, "false")) .run(context -> assertThat(context).hasSingleBean(AppConfigurationPropertySourceLocator.class)); } @Test public void iniConnectionStringUserAssigned() { contextRunner .withPropertyValues(propPair(FAIL_FAST_PROP, "false"), propPair("spring.cloud.azure.appconfiguration.managed-identity.client-id", "client-id")) .run(context -> assertThat(context).hasSingleBean(AppConfigurationPropertySourceLocator.class)); } @Test public void propertySourceLocatorBeanCreated() { contextRunner .withPropertyValues(propPair(CONN_STRING_PROP, TEST_CONN_STRING), propPair(FAIL_FAST_PROP, "false")) .run(context -> assertThat(context).hasSingleBean(AppConfigurationPropertySourceLocator.class)); } @Test public void clientsBeanCreated() { contextRunner .withPropertyValues(propPair(CONN_STRING_PROP, TEST_CONN_STRING), propPair(FAIL_FAST_PROP, "false")) .run(context -> assertThat(context).hasSingleBean(ClientStore.class)); } }
Do we want to sleep during playback mode?
public void canCreateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST2; final String publicIpDnsLabel = generateRandomResourceName("pip", 20); Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIpDnsLabel/*, DeleteOptions.DELETE*/) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(vm1.id(), computeManager.virtualMachines().getById(vm1.id()).id()); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); PublicIpAddress publicIpAddress = computeManager.networkManager().publicIpAddresses().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.networkManager().publicIpAddresses().deleteById(publicIpAddress.id()); String secondaryNicName = generateRandomResourceName("nic", 10); Creatable<NetworkInterface> secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable, DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm3 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); computeManager.virtualMachines().deleteById(vm3.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(3, computeManager.disks().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(2, computeManager.networkManager().networkInterfaces().listByResourceGroup(rgName).stream().count()); }
ResourceManagerUtils.sleep(Duration.ofSeconds(10));
public void canCreateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST2; final String publicIpDnsLabel = generateRandomResourceName("pip", 20); Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIpDnsLabel/*, DeleteOptions.DELETE*/) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(vm1.id(), computeManager.virtualMachines().getById(vm1.id()).id()); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); PublicIpAddress publicIpAddress = computeManager.networkManager().publicIpAddresses().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.networkManager().publicIpAddresses().deleteById(publicIpAddress.id()); String secondaryNicName = generateRandomResourceName("nic", 10); Creatable<NetworkInterface> secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable, DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm3 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); computeManager.virtualMachines().deleteById(vm3.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(3, computeManager.disks().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(2, computeManager.networkManager().networkInterfaces().listByResourceGroup(rgName).stream().count()); }
class VirtualMachineOperationsTests extends ComputeManagementTest { private String rgName = ""; private String rgName2 = ""; private final Region region = Region.US_EAST; private final Region regionProxPlacementGroup = Region.US_WEST; private final Region regionProxPlacementGroup2 = Region.US_EAST; private final String vmName = "javavm"; private final String proxGroupName = "testproxgroup1"; private final String proxGroupName2 = "testproxgroup2"; private final String availabilitySetName = "availset1"; private final String availabilitySetName2 = "availset2"; private final ProximityPlacementGroupType proxGroupType = ProximityPlacementGroupType.STANDARD; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { rgName = generateRandomResourceName("javacsmrg", 15); rgName2 = generateRandomResourceName("javacsmrg2", 15); super.initializeClients(httpPipeline, profile); } @Override protected void cleanUpResources() { if (rgName != null) { resourceManager.resourceGroups().beginDeleteByName(rgName); } } @Test public void canCreateVirtualMachineWithNetworking() throws Exception { NetworkSecurityGroup nsg = this .networkManager .networkSecurityGroups() .define("nsg") .withRegion(region) .withNewResourceGroup(rgName) .defineRule("rule1") .allowInbound() .fromAnyAddress() .fromPort(80) .toAnyAddress() .toPort(80) .withProtocol(SecurityRuleProtocol.TCP) .attach() .create(); Creatable<Network> networkDefinition = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .defineSubnet("subnet1") .withAddressPrefix("10.0.0.0/29") .withExistingNetworkSecurityGroup(nsg) .attach(); VirtualMachine vm = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork(networkDefinition) .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .create(); NetworkInterface primaryNic = vm.getPrimaryNetworkInterface(); Assertions.assertNotNull(primaryNic); NicIpConfiguration primaryIpConfig = primaryNic.primaryIPConfiguration(); Assertions.assertNotNull(primaryIpConfig); Assertions.assertNotNull(primaryIpConfig.networkId()); Network network = primaryIpConfig.getNetwork(); Assertions.assertNotNull(primaryIpConfig.subnetName()); Subnet subnet = network.subnets().get(primaryIpConfig.subnetName()); Assertions.assertNotNull(subnet); nsg = subnet.getNetworkSecurityGroup(); Assertions.assertNotNull(nsg); Assertions.assertEquals("nsg", nsg.name()); Assertions.assertEquals(1, nsg.securityRules().size()); nsg = primaryIpConfig.getNetworkSecurityGroup(); Assertions.assertEquals("nsg", nsg.name()); } @Test public void canCreateVirtualMachine() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotCreateVirtualMachineSyncPoll() throws Exception { final String mySqlInstallScript = "https: final String installCommand = "bash install_mysql_server_5.6.sh Abc.123x("; Assertions.assertThrows(IllegalStateException.class, () -> { Accepted<VirtualMachine> acceptedVirtualMachine = this.computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("fileUris", Collections.singletonList(mySqlInstallScript)) .withPublicSetting("commandToExecute", installCommand) .attach() .beginCreate(); }); boolean dependentResourceCreated = computeManager.resourceManager().serviceClient().getResourceGroups().checkExistence(rgName); Assertions.assertFalse(dependentResourceCreated); rgName = null; } @Test public void canCreateVirtualMachineSyncPoll() throws Exception { final long defaultDelayInMillis = 10 * 1000; Accepted<VirtualMachine> acceptedVirtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .beginCreate(); VirtualMachine createdVirtualMachine = acceptedVirtualMachine.getActivationResponse().getValue(); Assertions.assertNotEquals("Succeeded", createdVirtualMachine.provisioningState()); LongRunningOperationStatus pollStatus = acceptedVirtualMachine.getActivationResponse().getStatus(); long delayInMills = acceptedVirtualMachine.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : acceptedVirtualMachine.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedVirtualMachine.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : pollResponse.getRetryAfter().toMillis(); } Assertions.assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollStatus); VirtualMachine virtualMachine = acceptedVirtualMachine.getFinalResult(); Assertions.assertEquals("Succeeded", virtualMachine.provisioningState()); Accepted<Void> acceptedDelete = computeManager.virtualMachines() .beginDeleteByResourceGroup(virtualMachine.resourceGroupName(), virtualMachine.name()); pollStatus = acceptedDelete.getActivationResponse().getStatus(); delayInMills = acceptedDelete.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : (int) acceptedDelete.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedDelete.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : (int) pollResponse.getRetryAfter().toMillis(); } boolean deleted = false; try { computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException e) { if (e.getResponse().getStatusCode() == 404 && ("NotFound".equals(e.getValue().getCode()) || "ResourceNotFound".equals(e.getValue().getCode()))) { deleted = true; } } Assertions.assertTrue(deleted); } @Test public void canCreateUpdatePriorityAndPrice() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLowPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withMaxPrice(1000.0) .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); Assertions.assertEquals((Double) 1000.0, foundVM.billingProfile().maxPrice()); Assertions.assertEquals(VirtualMachineEvictionPolicyTypes.DEALLOCATE, foundVM.evictionPolicy()); try { foundVM.update().withMaxPrice(1500.0).apply(); Assertions.assertEquals((Double) 1500.0, foundVM.billingProfile().maxPrice()); Assertions.fail(); } catch (ManagementException e) { } foundVM.deallocate(); foundVM.update().withMaxPrice(2000.0).apply(); foundVM.start(); Assertions.assertEquals((Double) 2000.0, foundVM.billingProfile().maxPrice()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.SPOT).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.SPOT, foundVM.priority()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.LOW).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.LOW, foundVM.priority()); try { foundVM.update().withPriority(VirtualMachinePriorityTypes.REGULAR).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.REGULAR, foundVM.priority()); Assertions.fail(); } catch (ManagementException e) { } computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotUpdateProximityPlacementGroupForVirtualMachine() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); AvailabilitySet setCreated2 = computeManager .availabilitySets() .define(availabilitySetName2) .withRegion(regionProxPlacementGroup2) .withNewResourceGroup(rgName2) .withNewProximityPlacementGroup(proxGroupName2, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName2, setCreated2.name()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated2.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated2.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated2.id().equalsIgnoreCase(setCreated2.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated2.regionName(), setCreated2.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); try { VirtualMachine updatedVm = foundVM.update().withProximityPlacementGroup(setCreated2.proximityPlacementGroup().id()).apply(); } catch (ManagementException clEx) { Assertions .assertTrue( clEx .getMessage() .contains( "Updating proximity placement group of VM javavm is not allowed while the VM is running." + " Please stop/deallocate the VM and retry the operation.")); } computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndAvailabilitySetInSameProximityPlacementGroup() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); VirtualMachine updatedVm = foundVM.update().withoutProximityPlacementGroup().apply(); Assertions.assertNotNull(updatedVm.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, updatedVm.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(updatedVm.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(updatedVm.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(updatedVm.proximityPlacementGroup().availabilitySetIds().get(0))); computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndRelatedResourcesInParallel() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; List<String> networkCreatableKeys = creatablesInfo.networkCreatableKeys; List<String> publicIpCreatableKeys = creatablesInfo.publicIpCreatableKeys; CreatedResources<VirtualMachine> createdVirtualMachines = computeManager.virtualMachines().create(virtualMachineCreatables); Assertions.assertTrue(createdVirtualMachines.size() == count); Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } for (VirtualMachine virtualMachine : createdVirtualMachines.values()) { Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } for (String networkCreatableKey : networkCreatableKeys) { Network createdNetwork = (Network) createdVirtualMachines.createdRelatedResource(networkCreatableKey); Assertions.assertNotNull(createdNetwork); Assertions.assertTrue(networkNames.contains(createdNetwork.name())); } Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } for (String publicIpCreatableKey : publicIpCreatableKeys) { PublicIpAddress createdPublicIpAddress = (PublicIpAddress) createdVirtualMachines.createdRelatedResource(publicIpCreatableKey); Assertions.assertNotNull(createdPublicIpAddress); Assertions.assertTrue(publicIPAddressNames.contains(createdPublicIpAddress.name())); } } @Test public void canStreamParallelCreatedVirtualMachinesAndRelatedResources() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; final Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } final Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } final Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } final CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); final AtomicInteger resourceCount = new AtomicInteger(0); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; computeManager .virtualMachines() .createAsync(virtualMachineCreatables) .map( createdResource -> { if (createdResource instanceof Resource) { Resource resource = (Resource) createdResource; System.out.println("Created: " + resource.id()); if (resource instanceof VirtualMachine) { VirtualMachine virtualMachine = (VirtualMachine) resource; Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } else if (resource instanceof Network) { Network network = (Network) resource; Assertions.assertTrue(networkNames.contains(network.name())); Assertions.assertNotNull(network.id()); } else if (resource instanceof PublicIpAddress) { PublicIpAddress publicIPAddress = (PublicIpAddress) resource; Assertions.assertTrue(publicIPAddressNames.contains(publicIPAddress.name())); Assertions.assertNotNull(publicIPAddress.id()); } } resourceCount.incrementAndGet(); return createdResource; }) .blockLast(); networkNames.forEach(name -> { Assertions.assertNotNull(networkManager.networks().getByResourceGroup(rgName, name)); }); publicIPAddressNames.forEach(name -> { Assertions.assertNotNull(networkManager.publicIpAddresses().getByResourceGroup(rgName, name)); }); Assertions.assertEquals(1, storageManager.storageAccounts().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, networkManager.networkInterfaces().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, resourceCount.get()); } @Test public void canSetStorageAccountForUnmanagedDisk() { final String storageName = generateRandomResourceName("st", 14); StorageAccount storageAccount = storageManager .storageAccounts() .define(storageName) .withRegion(region) .withNewResourceGroup(rgName) .withSku(StorageAccountSkuType.PREMIUM_LRS) .create(); VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .defineUnmanagedDataDisk("disk1") .withNewVhd(100) .withLun(2) .storeAt(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .attach() .defineUnmanagedDataDisk("disk2") .withNewVhd(100) .withLun(3) .storeAt(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .attach() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .create(); Map<Integer, VirtualMachineUnmanagedDataDisk> unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); VirtualMachineUnmanagedDataDisk firstUnmanagedDataDisk = unmanagedDataDisks.get(2); Assertions.assertNotNull(firstUnmanagedDataDisk); VirtualMachineUnmanagedDataDisk secondUnmanagedDataDisk = unmanagedDataDisks.get(3); Assertions.assertNotNull(secondUnmanagedDataDisk); String createdVhdUri1 = firstUnmanagedDataDisk.vhdUri(); String createdVhdUri2 = secondUnmanagedDataDisk.vhdUri(); Assertions.assertNotNull(createdVhdUri1); Assertions.assertNotNull(createdVhdUri2); computeManager.virtualMachines().deleteById(virtualMachine.id()); virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .create(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(1, unmanagedDataDisks.size()); firstUnmanagedDataDisk = null; for (VirtualMachineUnmanagedDataDisk unmanagedDisk : unmanagedDataDisks.values()) { firstUnmanagedDataDisk = unmanagedDisk; break; } Assertions.assertNotNull(firstUnmanagedDataDisk.vhdUri()); Assertions.assertTrue(firstUnmanagedDataDisk.vhdUri().equalsIgnoreCase(createdVhdUri1)); virtualMachine .update() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .apply(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); } @Test public void canUpdateTagsOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); virtualMachine.update().withTag("test", "testValue").apply(); Assertions.assertEquals("testValue", virtualMachine.innerModel().tags().get("test")); Map<String, String> testTags = new HashMap<String, String>(); testTags.put("testTag", "testValue"); virtualMachine.update().withTags(testTags).apply(); Assertions.assertEquals(testTags.get("testTag"), virtualMachine.innerModel().tags().get("testTag")); } @Test public void canRunScriptOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .create(); List<String> installGit = new ArrayList<>(); installGit.add("sudo apt-get update"); installGit.add("sudo apt-get install -y git"); RunCommandResult runResult = virtualMachine.runShellScript(installGit, new ArrayList<RunCommandInputParameter>()); Assertions.assertNotNull(runResult); Assertions.assertNotNull(runResult.value()); Assertions.assertTrue(runResult.value().size() > 0); } @Test @DoNotRecord(skipInPlayback = true) public void canPerformSimulateEvictionOnSpotVirtualMachine() { VirtualMachine virtualMachine = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSpotPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Assertions.assertNotNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertTrue(virtualMachine.osDiskSize() > 0); Disk disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertNotNull(disk); Assertions.assertEquals(DiskState.ATTACHED, disk.innerModel().diskState()); virtualMachine.simulateEviction(); boolean deallocated = false; int pollIntervalInMinutes = 5; for (int i = 0; i < 30; i += pollIntervalInMinutes) { ResourceManagerUtils.sleep(Duration.ofMinutes(pollIntervalInMinutes)); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); if (virtualMachine.powerState() == PowerState.DEALLOCATED) { deallocated = true; break; } } Assertions.assertTrue(deallocated); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); Assertions.assertNotNull(virtualMachine); Assertions.assertNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertEquals(0, virtualMachine.osDiskSize()); disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertEquals(DiskState.RESERVED, disk.innerModel().diskState()); } @Test public void canForceDeleteVirtualMachine() { computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .create(); VirtualMachine virtualMachine = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(virtualMachine); Assertions.assertEquals(Region.fromName("eastus2euap"), virtualMachine.region()); String nicId = virtualMachine.primaryNetworkInterfaceId(); computeManager.virtualMachines().deleteById(virtualMachine.id(), true); try { virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException ex) { virtualMachine = null; Assertions.assertEquals(404, ex.getResponse().getStatusCode()); } Assertions.assertNull(virtualMachine); NetworkInterface nic = networkManager.networkInterfaces().getById(nicId); Assertions.assertNotNull(nic); } @Test @Test public void canUpdateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST2; Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm1.update() .withNewDataDisk(10) .apply(); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.disks().listByResourceGroup(rgName).stream().count()); Disk disk = computeManager.disks().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.disks().deleteById(disk.id()); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm2.update() .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .apply(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.disks().listByResourceGroup(rgName).stream().count()); } @Test public void canHibernateVirtualMachine() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withSize(VirtualMachineSizeTypes.STANDARD_D2S_V3) .enableHibernation() .create(); Assertions.assertTrue(vm.isHibernationEnabled()); vm.deallocate(true); InstanceViewStatus hibernationStatus = vm.instanceView().statuses().stream() .filter(status -> "HibernationState/Hibernated".equals(status.code())) .findFirst().orElse(null); Assertions.assertNotNull(hibernationStatus); vm.start(); vm.deallocate(); vm.update() .disableHibernation() .apply(); Assertions.assertFalse(vm.isHibernationEnabled()); } private CreatablesInfo prepareCreatableVirtualMachines( Region region, String vmNamePrefix, String networkNamePrefix, String publicIpNamePrefix, int vmCount) { Creatable<ResourceGroup> resourceGroupCreatable = resourceManager.resourceGroups().define(rgName).withRegion(region); Creatable<StorageAccount> storageAccountCreatable = storageManager .storageAccounts() .define(generateRandomResourceName("stg", 20)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); List<String> networkCreatableKeys = new ArrayList<>(); List<String> publicIpCreatableKeys = new ArrayList<>(); List<Creatable<VirtualMachine>> virtualMachineCreatables = new ArrayList<>(); for (int i = 0; i < vmCount; i++) { Creatable<Network> networkCreatable = networkManager .networks() .define(String.format("%s-%d", networkNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withAddressSpace("10.0.0.0/28"); networkCreatableKeys.add(networkCreatable.key()); Creatable<PublicIpAddress> publicIPAddressCreatable = networkManager .publicIpAddresses() .define(String.format("%s-%d", publicIpNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); publicIpCreatableKeys.add(publicIPAddressCreatable.key()); Creatable<VirtualMachine> virtualMachineCreatable = computeManager .virtualMachines() .define(String.format("%s-%d", vmNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withNewPrimaryNetwork(networkCreatable) .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIPAddressCreatable) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("tirekicker") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withNewStorageAccount(storageAccountCreatable); virtualMachineCreatables.add(virtualMachineCreatable); } CreatablesInfo creatablesInfo = new CreatablesInfo(); creatablesInfo.virtualMachineCreatables = virtualMachineCreatables; creatablesInfo.networkCreatableKeys = networkCreatableKeys; creatablesInfo.publicIpCreatableKeys = publicIpCreatableKeys; return creatablesInfo; } class CreatablesInfo { private List<Creatable<VirtualMachine>> virtualMachineCreatables; List<String> networkCreatableKeys; List<String> publicIpCreatableKeys; } }
class VirtualMachineOperationsTests extends ComputeManagementTest { private String rgName = ""; private String rgName2 = ""; private final Region region = Region.US_EAST; private final Region regionProxPlacementGroup = Region.US_WEST; private final Region regionProxPlacementGroup2 = Region.US_EAST; private final String vmName = "javavm"; private final String proxGroupName = "testproxgroup1"; private final String proxGroupName2 = "testproxgroup2"; private final String availabilitySetName = "availset1"; private final String availabilitySetName2 = "availset2"; private final ProximityPlacementGroupType proxGroupType = ProximityPlacementGroupType.STANDARD; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { rgName = generateRandomResourceName("javacsmrg", 15); rgName2 = generateRandomResourceName("javacsmrg2", 15); super.initializeClients(httpPipeline, profile); } @Override protected void cleanUpResources() { if (rgName != null) { resourceManager.resourceGroups().beginDeleteByName(rgName); } } @Test public void canCreateVirtualMachineWithNetworking() throws Exception { NetworkSecurityGroup nsg = this .networkManager .networkSecurityGroups() .define("nsg") .withRegion(region) .withNewResourceGroup(rgName) .defineRule("rule1") .allowInbound() .fromAnyAddress() .fromPort(80) .toAnyAddress() .toPort(80) .withProtocol(SecurityRuleProtocol.TCP) .attach() .create(); Creatable<Network> networkDefinition = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .defineSubnet("subnet1") .withAddressPrefix("10.0.0.0/29") .withExistingNetworkSecurityGroup(nsg) .attach(); VirtualMachine vm = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork(networkDefinition) .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .create(); NetworkInterface primaryNic = vm.getPrimaryNetworkInterface(); Assertions.assertNotNull(primaryNic); NicIpConfiguration primaryIpConfig = primaryNic.primaryIPConfiguration(); Assertions.assertNotNull(primaryIpConfig); Assertions.assertNotNull(primaryIpConfig.networkId()); Network network = primaryIpConfig.getNetwork(); Assertions.assertNotNull(primaryIpConfig.subnetName()); Subnet subnet = network.subnets().get(primaryIpConfig.subnetName()); Assertions.assertNotNull(subnet); nsg = subnet.getNetworkSecurityGroup(); Assertions.assertNotNull(nsg); Assertions.assertEquals("nsg", nsg.name()); Assertions.assertEquals(1, nsg.securityRules().size()); nsg = primaryIpConfig.getNetworkSecurityGroup(); Assertions.assertEquals("nsg", nsg.name()); } @Test public void canCreateVirtualMachine() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotCreateVirtualMachineSyncPoll() throws Exception { final String mySqlInstallScript = "https: final String installCommand = "bash install_mysql_server_5.6.sh Abc.123x("; Assertions.assertThrows(IllegalStateException.class, () -> { Accepted<VirtualMachine> acceptedVirtualMachine = this.computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("fileUris", Collections.singletonList(mySqlInstallScript)) .withPublicSetting("commandToExecute", installCommand) .attach() .beginCreate(); }); boolean dependentResourceCreated = computeManager.resourceManager().serviceClient().getResourceGroups().checkExistence(rgName); Assertions.assertFalse(dependentResourceCreated); rgName = null; } @Test public void canCreateVirtualMachineSyncPoll() throws Exception { final long defaultDelayInMillis = 10 * 1000; Accepted<VirtualMachine> acceptedVirtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .beginCreate(); VirtualMachine createdVirtualMachine = acceptedVirtualMachine.getActivationResponse().getValue(); Assertions.assertNotEquals("Succeeded", createdVirtualMachine.provisioningState()); LongRunningOperationStatus pollStatus = acceptedVirtualMachine.getActivationResponse().getStatus(); long delayInMills = acceptedVirtualMachine.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : acceptedVirtualMachine.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedVirtualMachine.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : pollResponse.getRetryAfter().toMillis(); } Assertions.assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollStatus); VirtualMachine virtualMachine = acceptedVirtualMachine.getFinalResult(); Assertions.assertEquals("Succeeded", virtualMachine.provisioningState()); Accepted<Void> acceptedDelete = computeManager.virtualMachines() .beginDeleteByResourceGroup(virtualMachine.resourceGroupName(), virtualMachine.name()); pollStatus = acceptedDelete.getActivationResponse().getStatus(); delayInMills = acceptedDelete.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : (int) acceptedDelete.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedDelete.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : (int) pollResponse.getRetryAfter().toMillis(); } boolean deleted = false; try { computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException e) { if (e.getResponse().getStatusCode() == 404 && ("NotFound".equals(e.getValue().getCode()) || "ResourceNotFound".equals(e.getValue().getCode()))) { deleted = true; } } Assertions.assertTrue(deleted); } @Test public void canCreateUpdatePriorityAndPrice() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLowPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withMaxPrice(1000.0) .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); Assertions.assertEquals((Double) 1000.0, foundVM.billingProfile().maxPrice()); Assertions.assertEquals(VirtualMachineEvictionPolicyTypes.DEALLOCATE, foundVM.evictionPolicy()); try { foundVM.update().withMaxPrice(1500.0).apply(); Assertions.assertEquals((Double) 1500.0, foundVM.billingProfile().maxPrice()); Assertions.fail(); } catch (ManagementException e) { } foundVM.deallocate(); foundVM.update().withMaxPrice(2000.0).apply(); foundVM.start(); Assertions.assertEquals((Double) 2000.0, foundVM.billingProfile().maxPrice()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.SPOT).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.SPOT, foundVM.priority()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.LOW).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.LOW, foundVM.priority()); try { foundVM.update().withPriority(VirtualMachinePriorityTypes.REGULAR).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.REGULAR, foundVM.priority()); Assertions.fail(); } catch (ManagementException e) { } computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotUpdateProximityPlacementGroupForVirtualMachine() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); AvailabilitySet setCreated2 = computeManager .availabilitySets() .define(availabilitySetName2) .withRegion(regionProxPlacementGroup2) .withNewResourceGroup(rgName2) .withNewProximityPlacementGroup(proxGroupName2, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName2, setCreated2.name()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated2.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated2.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated2.id().equalsIgnoreCase(setCreated2.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated2.regionName(), setCreated2.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); try { VirtualMachine updatedVm = foundVM.update().withProximityPlacementGroup(setCreated2.proximityPlacementGroup().id()).apply(); } catch (ManagementException clEx) { Assertions .assertTrue( clEx .getMessage() .contains( "Updating proximity placement group of VM javavm is not allowed while the VM is running." + " Please stop/deallocate the VM and retry the operation.")); } computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndAvailabilitySetInSameProximityPlacementGroup() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); VirtualMachine updatedVm = foundVM.update().withoutProximityPlacementGroup().apply(); Assertions.assertNotNull(updatedVm.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, updatedVm.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(updatedVm.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(updatedVm.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(updatedVm.proximityPlacementGroup().availabilitySetIds().get(0))); computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndRelatedResourcesInParallel() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; List<String> networkCreatableKeys = creatablesInfo.networkCreatableKeys; List<String> publicIpCreatableKeys = creatablesInfo.publicIpCreatableKeys; CreatedResources<VirtualMachine> createdVirtualMachines = computeManager.virtualMachines().create(virtualMachineCreatables); Assertions.assertTrue(createdVirtualMachines.size() == count); Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } for (VirtualMachine virtualMachine : createdVirtualMachines.values()) { Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } for (String networkCreatableKey : networkCreatableKeys) { Network createdNetwork = (Network) createdVirtualMachines.createdRelatedResource(networkCreatableKey); Assertions.assertNotNull(createdNetwork); Assertions.assertTrue(networkNames.contains(createdNetwork.name())); } Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } for (String publicIpCreatableKey : publicIpCreatableKeys) { PublicIpAddress createdPublicIpAddress = (PublicIpAddress) createdVirtualMachines.createdRelatedResource(publicIpCreatableKey); Assertions.assertNotNull(createdPublicIpAddress); Assertions.assertTrue(publicIPAddressNames.contains(createdPublicIpAddress.name())); } } @Test public void canStreamParallelCreatedVirtualMachinesAndRelatedResources() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; final Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } final Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } final Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } final CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); final AtomicInteger resourceCount = new AtomicInteger(0); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; computeManager .virtualMachines() .createAsync(virtualMachineCreatables) .map( createdResource -> { if (createdResource instanceof Resource) { Resource resource = (Resource) createdResource; System.out.println("Created: " + resource.id()); if (resource instanceof VirtualMachine) { VirtualMachine virtualMachine = (VirtualMachine) resource; Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } else if (resource instanceof Network) { Network network = (Network) resource; Assertions.assertTrue(networkNames.contains(network.name())); Assertions.assertNotNull(network.id()); } else if (resource instanceof PublicIpAddress) { PublicIpAddress publicIPAddress = (PublicIpAddress) resource; Assertions.assertTrue(publicIPAddressNames.contains(publicIPAddress.name())); Assertions.assertNotNull(publicIPAddress.id()); } } resourceCount.incrementAndGet(); return createdResource; }) .blockLast(); networkNames.forEach(name -> { Assertions.assertNotNull(networkManager.networks().getByResourceGroup(rgName, name)); }); publicIPAddressNames.forEach(name -> { Assertions.assertNotNull(networkManager.publicIpAddresses().getByResourceGroup(rgName, name)); }); Assertions.assertEquals(1, storageManager.storageAccounts().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, networkManager.networkInterfaces().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, resourceCount.get()); } @Test public void canSetStorageAccountForUnmanagedDisk() { final String storageName = generateRandomResourceName("st", 14); StorageAccount storageAccount = storageManager .storageAccounts() .define(storageName) .withRegion(region) .withNewResourceGroup(rgName) .withSku(StorageAccountSkuType.PREMIUM_LRS) .create(); VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .defineUnmanagedDataDisk("disk1") .withNewVhd(100) .withLun(2) .storeAt(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .attach() .defineUnmanagedDataDisk("disk2") .withNewVhd(100) .withLun(3) .storeAt(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .attach() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .create(); Map<Integer, VirtualMachineUnmanagedDataDisk> unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); VirtualMachineUnmanagedDataDisk firstUnmanagedDataDisk = unmanagedDataDisks.get(2); Assertions.assertNotNull(firstUnmanagedDataDisk); VirtualMachineUnmanagedDataDisk secondUnmanagedDataDisk = unmanagedDataDisks.get(3); Assertions.assertNotNull(secondUnmanagedDataDisk); String createdVhdUri1 = firstUnmanagedDataDisk.vhdUri(); String createdVhdUri2 = secondUnmanagedDataDisk.vhdUri(); Assertions.assertNotNull(createdVhdUri1); Assertions.assertNotNull(createdVhdUri2); computeManager.virtualMachines().deleteById(virtualMachine.id()); virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .create(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(1, unmanagedDataDisks.size()); firstUnmanagedDataDisk = null; for (VirtualMachineUnmanagedDataDisk unmanagedDisk : unmanagedDataDisks.values()) { firstUnmanagedDataDisk = unmanagedDisk; break; } Assertions.assertNotNull(firstUnmanagedDataDisk.vhdUri()); Assertions.assertTrue(firstUnmanagedDataDisk.vhdUri().equalsIgnoreCase(createdVhdUri1)); virtualMachine .update() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .apply(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); } @Test public void canUpdateTagsOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); virtualMachine.update().withTag("test", "testValue").apply(); Assertions.assertEquals("testValue", virtualMachine.innerModel().tags().get("test")); Map<String, String> testTags = new HashMap<String, String>(); testTags.put("testTag", "testValue"); virtualMachine.update().withTags(testTags).apply(); Assertions.assertEquals(testTags.get("testTag"), virtualMachine.innerModel().tags().get("testTag")); } @Test public void canRunScriptOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .create(); List<String> installGit = new ArrayList<>(); installGit.add("sudo apt-get update"); installGit.add("sudo apt-get install -y git"); RunCommandResult runResult = virtualMachine.runShellScript(installGit, new ArrayList<RunCommandInputParameter>()); Assertions.assertNotNull(runResult); Assertions.assertNotNull(runResult.value()); Assertions.assertTrue(runResult.value().size() > 0); } @Test @DoNotRecord(skipInPlayback = true) public void canPerformSimulateEvictionOnSpotVirtualMachine() { VirtualMachine virtualMachine = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSpotPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Assertions.assertNotNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertTrue(virtualMachine.osDiskSize() > 0); Disk disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertNotNull(disk); Assertions.assertEquals(DiskState.ATTACHED, disk.innerModel().diskState()); virtualMachine.simulateEviction(); boolean deallocated = false; int pollIntervalInMinutes = 5; for (int i = 0; i < 30; i += pollIntervalInMinutes) { ResourceManagerUtils.sleep(Duration.ofMinutes(pollIntervalInMinutes)); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); if (virtualMachine.powerState() == PowerState.DEALLOCATED) { deallocated = true; break; } } Assertions.assertTrue(deallocated); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); Assertions.assertNotNull(virtualMachine); Assertions.assertNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertEquals(0, virtualMachine.osDiskSize()); disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertEquals(DiskState.RESERVED, disk.innerModel().diskState()); } @Test public void canForceDeleteVirtualMachine() { computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .create(); VirtualMachine virtualMachine = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(virtualMachine); Assertions.assertEquals(Region.fromName("eastus2euap"), virtualMachine.region()); String nicId = virtualMachine.primaryNetworkInterfaceId(); computeManager.virtualMachines().deleteById(virtualMachine.id(), true); try { virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException ex) { virtualMachine = null; Assertions.assertEquals(404, ex.getResponse().getStatusCode()); } Assertions.assertNull(virtualMachine); NetworkInterface nic = networkManager.networkInterfaces().getById(nicId); Assertions.assertNotNull(nic); } @Test @Test public void canUpdateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST2; Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm1.update() .withNewDataDisk(10) .apply(); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.disks().listByResourceGroup(rgName).stream().count()); Disk disk = computeManager.disks().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.disks().deleteById(disk.id()); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm2.update() .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .apply(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.disks().listByResourceGroup(rgName).stream().count()); } @Test public void canHibernateVirtualMachine() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withSize(VirtualMachineSizeTypes.STANDARD_D2S_V3) .enableHibernation() .create(); Assertions.assertTrue(vm.isHibernationEnabled()); vm.deallocate(true); InstanceViewStatus hibernationStatus = vm.instanceView().statuses().stream() .filter(status -> "HibernationState/Hibernated".equals(status.code())) .findFirst().orElse(null); Assertions.assertNotNull(hibernationStatus); vm.start(); vm.deallocate(); vm.update() .disableHibernation() .apply(); Assertions.assertFalse(vm.isHibernationEnabled()); } private CreatablesInfo prepareCreatableVirtualMachines( Region region, String vmNamePrefix, String networkNamePrefix, String publicIpNamePrefix, int vmCount) { Creatable<ResourceGroup> resourceGroupCreatable = resourceManager.resourceGroups().define(rgName).withRegion(region); Creatable<StorageAccount> storageAccountCreatable = storageManager .storageAccounts() .define(generateRandomResourceName("stg", 20)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); List<String> networkCreatableKeys = new ArrayList<>(); List<String> publicIpCreatableKeys = new ArrayList<>(); List<Creatable<VirtualMachine>> virtualMachineCreatables = new ArrayList<>(); for (int i = 0; i < vmCount; i++) { Creatable<Network> networkCreatable = networkManager .networks() .define(String.format("%s-%d", networkNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withAddressSpace("10.0.0.0/28"); networkCreatableKeys.add(networkCreatable.key()); Creatable<PublicIpAddress> publicIPAddressCreatable = networkManager .publicIpAddresses() .define(String.format("%s-%d", publicIpNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); publicIpCreatableKeys.add(publicIPAddressCreatable.key()); Creatable<VirtualMachine> virtualMachineCreatable = computeManager .virtualMachines() .define(String.format("%s-%d", vmNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withNewPrimaryNetwork(networkCreatable) .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIPAddressCreatable) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("tirekicker") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withNewStorageAccount(storageAccountCreatable); virtualMachineCreatables.add(virtualMachineCreatable); } CreatablesInfo creatablesInfo = new CreatablesInfo(); creatablesInfo.virtualMachineCreatables = virtualMachineCreatables; creatablesInfo.networkCreatableKeys = networkCreatableKeys; creatablesInfo.publicIpCreatableKeys = publicIpCreatableKeys; return creatablesInfo; } class CreatablesInfo { private List<Creatable<VirtualMachine>> virtualMachineCreatables; List<String> networkCreatableKeys; List<String> publicIpCreatableKeys; } }
Why was this changed? That doesn't seem right.
public String[] getPropertyNames() { Set<String> keySet = properties.keySet(); return keySet.toArray(new String[0]); }
return keySet.toArray(new String[0]);
public String[] getPropertyNames() { Set<String> keySet = properties.keySet(); return keySet.toArray(new String[keySet.size()]); }
class AppConfigurationPropertySource extends EnumerablePropertySource<ConfigurationClient> { private static final Logger LOGGER = LoggerFactory.getLogger(AppConfigurationPropertySource.class); private final String context; private Map<String, Object> properties = new LinkedHashMap<>(); private final String label; private AppConfigurationProperties appConfigurationProperties; private static ObjectMapper mapper = new ObjectMapper(); private HashMap<String, KeyVaultClient> keyVaultClients; private ClientStore clients; private KeyVaultCredentialProvider keyVaultCredentialProvider; private SecretClientBuilderSetup keyVaultClientProvider; private AppConfigurationProviderProperties appProperties; private ConfigStore configStore; AppConfigurationPropertySource(String context, ConfigStore configStore, String label, AppConfigurationProperties appConfigurationProperties, ClientStore clients, AppConfigurationProviderProperties appProperties, KeyVaultCredentialProvider keyVaultCredentialProvider, SecretClientBuilderSetup keyVaultClientProvider) { super(context + configStore.getEndpoint() + "/" + label); this.context = context; this.configStore = configStore; this.label = label; this.appConfigurationProperties = appConfigurationProperties; this.appProperties = appProperties; this.keyVaultClients = new HashMap<String, KeyVaultClient>(); this.clients = clients; this.keyVaultCredentialProvider = keyVaultCredentialProvider; this.keyVaultClientProvider = keyVaultClientProvider; } @Override @Override public Object getProperty(String name) { return properties.get(name); } /** * <p> * Gets settings from Azure/Cache to set as configurations. Updates the cache. * </p> * * <p> * <b>Note</b>: Doesn't update Feature Management, just stores values in cache. Call * {@code initFeatures} to update Feature Management, but make sure its done in the * last {@code AppConfigurationPropertySource} * </p> * * @param featureSet The set of Feature Management Flags from various config stores. * @throws IOException Thrown when processing key/value failed when reading feature * flags * @return Updated Feature Set from Property Source */ FeatureSet initProperties(FeatureSet featureSet) throws IOException { String storeName = configStore.getEndpoint(); Date date = new Date(); SettingSelector settingSelector = new SettingSelector().setLabelFilter(label); settingSelector.setKeyFilter(context + "*"); List<ConfigurationSetting> settings = clients.listSettings(settingSelector, storeName); settingSelector.setKeyFilter(".appconfig*"); List<ConfigurationSetting> features = clients.listSettings(settingSelector, storeName); if (settings == null || features == null) { throw new IOException("Unable to load properties from App Configuration Store."); } for (ConfigurationSetting setting : settings) { String key = setting.getKey().trim().substring(context.length()).replace('/', '.'); if (setting.getContentType() != null && setting.getContentType().equals(KEY_VAULT_CONTENT_TYPE)) { String entry = getKeyVaultEntry(setting.getValue()); if (entry != null) { properties.put(key, entry); } } else { properties.put(key, setting.getValue()); } } return addToFeatureSet(featureSet, features, date); } /** * Given a Setting's Key Vault Reference stored in the Settings value, it will get its * entry in Key Vault. * * @param value {"uri": * "&lt;your-vault-url&gt;/secret/&lt;secret&gt;/&lt;version&gt;"} * @return Key Vault Secret Value */ private String getKeyVaultEntry(String value) { String secretValue = null; try { URI uri = null; try { JsonNode kvReference = mapper.readTree(value); uri = new URI(kvReference.at("/uri").asText()); } catch (URISyntaxException e) { LOGGER.error("Error Processing Key Vault Entry URI."); ReflectionUtils.rethrowRuntimeException(e); } KeyVaultSecret secret = getKeyVaultClient(uri, uri.getHost()) .getSecret(uri, appProperties.getMaxRetryTime()); if (secret == null) { throw new IOException("No Key Vault Secret found for Reference."); } secretValue = secret.getValue(); } catch (RuntimeException | IOException e) { LOGGER.error("Error Retrieving Key Vault Entry"); ReflectionUtils.rethrowRuntimeException(e); } return secretValue; } KeyVaultClient getKeyVaultClient(URI uri, String uriHost) { return keyVaultClients.computeIfAbsent(uriHost, ignored -> new KeyVaultClient(appConfigurationProperties, uri, keyVaultCredentialProvider, keyVaultClientProvider)); } /** * Initializes Feature Management configurations. Only one * {@code AppConfigurationPropertySource} can call this, and it needs to be done after * the rest have run initProperties. * @param featureSet Feature Flag info to be set to this property source. */ void initFeatures(FeatureSet featureSet) { ObjectMapper featureMapper = new ObjectMapper(); featureMapper.setPropertyNamingStrategy(PropertyNamingStrategy.KEBAB_CASE); properties.put(FEATURE_MANAGEMENT_KEY, featureMapper.convertValue(featureSet.getFeatureManagement(), LinkedHashMap.class)); } /** * Adds items to a {@code FeatureSet} from a list of {@code KeyValueItem}. * * @param featureSet The parsed KeyValueItems will be added to this * @param settings New items read in from Azure * @param date Cache timestamp * @throws IOException */ private FeatureSet addToFeatureSet(FeatureSet featureSet, List<ConfigurationSetting> settings, Date date) throws IOException { for (ConfigurationSetting setting : settings) { Object feature = createFeature(setting); if (feature != null) { featureSet.addFeature(setting.getKey().trim().substring(FEATURE_FLAG_PREFIX.length()), feature); } } return featureSet; } /** * Creates a {@code Feature} from a {@code KeyValueItem} * * @param item Used to create Features before being converted to be set into * properties. * @return Feature created from KeyValueItem * @throws IOException */ private Object createFeature(ConfigurationSetting item) throws IOException { Feature feature = null; if (item.getContentType() != null && item.getContentType().equals(FEATURE_FLAG_CONTENT_TYPE)) { try { String key = item.getKey().trim().substring(FEATURE_FLAG_PREFIX.length()); FeatureManagementItem featureItem = mapper.readValue(item.getValue(), FeatureManagementItem.class); feature = new Feature(key, featureItem); if (feature.getEnabledFor().size() == 0 && featureItem.getEnabled()) { return true; } else if (!featureItem.getEnabled()) { return false; } return feature; } catch (IOException e) { throw new IOException("Unable to parse Feature Management values from Azure.", e); } } else { String message = String.format("Found Feature Flag %s with invalid Content Type of %s", item.getKey(), item.getContentType()); throw new IOException(message); } } }
class AppConfigurationPropertySource extends EnumerablePropertySource<ConfigurationClient> { private static final Logger LOGGER = LoggerFactory.getLogger(AppConfigurationPropertySource.class); private final String context; private Map<String, Object> properties = new LinkedHashMap<>(); private final String label; private AppConfigurationProperties appConfigurationProperties; private static ObjectMapper mapper = new ObjectMapper(); private HashMap<String, KeyVaultClient> keyVaultClients; private ClientStore clients; private KeyVaultCredentialProvider keyVaultCredentialProvider; private SecretClientBuilderSetup keyVaultClientProvider; private AppConfigurationProviderProperties appProperties; private ConfigStore configStore; AppConfigurationPropertySource(String context, ConfigStore configStore, String label, AppConfigurationProperties appConfigurationProperties, ClientStore clients, AppConfigurationProviderProperties appProperties, KeyVaultCredentialProvider keyVaultCredentialProvider, SecretClientBuilderSetup keyVaultClientProvider) { super(context + configStore.getEndpoint() + "/" + label); this.context = context; this.configStore = configStore; this.label = label; this.appConfigurationProperties = appConfigurationProperties; this.appProperties = appProperties; this.keyVaultClients = new HashMap<String, KeyVaultClient>(); this.clients = clients; this.keyVaultCredentialProvider = keyVaultCredentialProvider; this.keyVaultClientProvider = keyVaultClientProvider; } @Override @Override public Object getProperty(String name) { return properties.get(name); } /** * <p> * Gets settings from Azure/Cache to set as configurations. Updates the cache. * </p> * * <p> * <b>Note</b>: Doesn't update Feature Management, just stores values in cache. Call * {@code initFeatures} to update Feature Management, but make sure its done in the * last {@code AppConfigurationPropertySource} * </p> * * @param featureSet The set of Feature Management Flags from various config stores. * @throws IOException Thrown when processing key/value failed when reading feature * flags * @return Updated Feature Set from Property Source */ FeatureSet initProperties(FeatureSet featureSet) throws IOException { String storeName = configStore.getEndpoint(); Date date = new Date(); SettingSelector settingSelector = new SettingSelector().setLabelFilter(label); settingSelector.setKeyFilter(context + "*"); List<ConfigurationSetting> settings = clients.listSettings(settingSelector, storeName); settingSelector.setKeyFilter(".appconfig*"); List<ConfigurationSetting> features = clients.listSettings(settingSelector, storeName); if (settings == null || features == null) { throw new IOException("Unable to load properties from App Configuration Store."); } for (ConfigurationSetting setting : settings) { String key = setting.getKey().trim().substring(context.length()).replace('/', '.'); if (setting.getContentType() != null && setting.getContentType().equals(KEY_VAULT_CONTENT_TYPE)) { String entry = getKeyVaultEntry(setting.getValue()); if (entry != null) { properties.put(key, entry); } } else { properties.put(key, setting.getValue()); } } return addToFeatureSet(featureSet, features, date); } /** * Given a Setting's Key Vault Reference stored in the Settings value, it will get its * entry in Key Vault. * * @param value {"uri": * "&lt;your-vault-url&gt;/secret/&lt;secret&gt;/&lt;version&gt;"} * @return Key Vault Secret Value */ private String getKeyVaultEntry(String value) { String secretValue = null; try { URI uri = null; try { JsonNode kvReference = mapper.readTree(value); uri = new URI(kvReference.at("/uri").asText()); } catch (URISyntaxException e) { LOGGER.error("Error Processing Key Vault Entry URI."); ReflectionUtils.rethrowRuntimeException(e); } KeyVaultSecret secret = getKeyVaultClient(uri, uri.getHost()) .getSecret(uri, appProperties.getMaxRetryTime()); if (secret == null) { throw new IOException("No Key Vault Secret found for Reference."); } secretValue = secret.getValue(); } catch (RuntimeException | IOException e) { LOGGER.error("Error Retrieving Key Vault Entry"); ReflectionUtils.rethrowRuntimeException(e); } return secretValue; } KeyVaultClient getKeyVaultClient(URI uri, String uriHost) { return keyVaultClients.computeIfAbsent(uriHost, ignored -> new KeyVaultClient(appConfigurationProperties, uri, keyVaultCredentialProvider, keyVaultClientProvider)); } /** * Initializes Feature Management configurations. Only one * {@code AppConfigurationPropertySource} can call this, and it needs to be done after * the rest have run initProperties. * @param featureSet Feature Flag info to be set to this property source. */ void initFeatures(FeatureSet featureSet) { ObjectMapper featureMapper = new ObjectMapper(); featureMapper.setPropertyNamingStrategy(PropertyNamingStrategy.KEBAB_CASE); properties.put(FEATURE_MANAGEMENT_KEY, featureMapper.convertValue(featureSet.getFeatureManagement(), LinkedHashMap.class)); } /** * Adds items to a {@code FeatureSet} from a list of {@code KeyValueItem}. * * @param featureSet The parsed KeyValueItems will be added to this * @param settings New items read in from Azure * @param date Cache timestamp * @throws IOException */ private FeatureSet addToFeatureSet(FeatureSet featureSet, List<ConfigurationSetting> settings, Date date) throws IOException { for (ConfigurationSetting setting : settings) { Object feature = createFeature(setting); if (feature != null) { featureSet.addFeature(setting.getKey().trim().substring(FEATURE_FLAG_PREFIX.length()), feature); } } return featureSet; } /** * Creates a {@code Feature} from a {@code KeyValueItem} * * @param item Used to create Features before being converted to be set into * properties. * @return Feature created from KeyValueItem * @throws IOException */ private Object createFeature(ConfigurationSetting item) throws IOException { Feature feature = null; if (item.getContentType() != null && item.getContentType().equals(FEATURE_FLAG_CONTENT_TYPE)) { try { String key = item.getKey().trim().substring(FEATURE_FLAG_PREFIX.length()); FeatureManagementItem featureItem = mapper.readValue(item.getValue(), FeatureManagementItem.class); feature = new Feature(key, featureItem); if (feature.getEnabledFor().size() == 0 && featureItem.getEnabled()) { return true; } else if (!featureItem.getEnabled()) { return false; } return feature; } catch (IOException e) { throw new IOException("Unable to parse Feature Management values from Azure.", e); } } else { String message = String.format("Found Feature Flag %s with invalid Content Type of %s", item.getKey(), item.getContentType()); throw new IOException(message); } } }
maybe "Transitive closures from azure-core will be ignored."
public TransformationResult tryTransform(@Nullable E oldElement, @Nullable E newElement, Difference difference) { String newArchive = difference.attachments.get("newArchive"); String newArchiveRole = difference.attachments.get("newArchiveRole"); if (newArchive == null) { return TransformationResult.keep(); } if (!SUPPLEMENTARY.equalsIgnoreCase(newArchiveRole)) { return TransformationResult.keep(); } if (!CORE_ARCHIVE.matcher(newArchive).matches()) { return TransformationResult.keep(); } if (difference.criticality == Criticality.ERROR) { return TransformationResult.replaceWith(Difference.copy(difference) .withCriticality(Criticality.DOCUMENTED) .withJustification("Transitive changes from Core libraries should be ignored.") .build()); } else { return TransformationResult.keep(); } }
.withJustification("Transitive changes from Core libraries should be ignored.")
public TransformationResult tryTransform(@Nullable E oldElement, @Nullable E newElement, Difference difference) { String newArchive = difference.attachments.get("newArchive"); String newArchiveRole = difference.attachments.get("newArchiveRole"); if (newArchive == null) { return TransformationResult.keep(); } if (!SUPPLEMENTARY.equalsIgnoreCase(newArchiveRole)) { return TransformationResult.keep(); } if (!CORE_ARCHIVE.matcher(newArchive).matches()) { return TransformationResult.keep(); } if (difference.criticality == Criticality.ERROR) { return TransformationResult.discard(); } else { return TransformationResult.keep(); } }
class TransitiveCoreChangesTransform<E extends Element<E>> extends BaseDifferenceTransform<E> { private static final Pattern CORE_ARCHIVE = Pattern.compile("com\\.azure:azure-core:.*"); private static final String SUPPLEMENTARY = Archive.Role.SUPPLEMENTARY.toString(); @Override public Pattern[] getDifferenceCodePatterns() { return new Pattern[] { Pattern.compile(".*") }; } @Override public String getExtensionId() { return "transitive-core-changes"; } @Override }
class TransitiveCoreChangesTransform<E extends Element<E>> extends BaseDifferenceTransform<E> { private static final Pattern CORE_ARCHIVE = Pattern.compile("com\\.azure:azure-core:.*"); private static final String SUPPLEMENTARY = Archive.Role.SUPPLEMENTARY.toString(); @Override public Pattern[] getDifferenceCodePatterns() { return new Pattern[] { Pattern.compile(".*") }; } @Override public String getExtensionId() { return "transitive-core-changes"; } @Override }
resetWriterIndex is necessary here?
public static byte[] encode(final UUID uuid) { final byte[] bytes = new byte[2 * Long.BYTES]; encode(uuid, Unpooled.wrappedBuffer(bytes).resetWriterIndex()); return bytes; }
encode(uuid, Unpooled.wrappedBuffer(bytes).resetWriterIndex());
public static byte[] encode(final UUID uuid) { final byte[] bytes = new byte[2 * Long.BYTES]; encode(uuid, Unpooled.wrappedBuffer(bytes).resetWriterIndex()); return bytes; }
class RntbdUUID { public static final UUID EMPTY = new UUID(0L, 0L); private RntbdUUID() { } /** * Decode a {@link UUID} as serialized by Microsoft APIs like {@code System.Guid.ToByteArray} * * @param bytes a {@link byte} array containing the serialized {@link UUID} to be decoded * @return a new {@link UUID} */ public static UUID decode(final byte[] bytes) { return decode(Unpooled.wrappedBuffer(bytes)); } /** * Decode a {@link UUID} as serialized by Microsoft APIs like {@code System.Guid.ToByteArray} * * @param in a {@link ByteBuf} containing the serialized {@link UUID} to be decoded * @return a new {@link UUID} */ public static UUID decode(final ByteBuf in) { checkNotNull(in, "in"); if (in.readableBytes() < 2 * Long.BYTES) { final String reason = Strings.lenientFormat("invalid frame length: %s", in.readableBytes()); throw new CorruptedFrameException(reason); } long mostSignificantBits = in.readUnsignedIntLE() << 32; mostSignificantBits |= (0x000000000000FFFFL & in.readShortLE()) << 16; mostSignificantBits |= (0x000000000000FFFFL & in.readShortLE()); long leastSignificantBits = (0x000000000000FFFFL & in.readShort()) << (32 + 16); for (int shift = 32 + 8; shift >= 0; shift -= 8) { leastSignificantBits |= (0x00000000000000FFL & in.readByte()) << shift; } return new UUID(mostSignificantBits, leastSignificantBits); } /** * Encodes a {@link UUID} as serialized by Microsoft APIs like {@code System.Guid.ToByteArray} * * @param uuid a {@link UUID} to be encoded * @return a new byte array containing the encoded */ /** * Encodes a {@link UUID} as serialized by Microsoft APIs like {@code System.Guid.ToByteArray} * * @param uuid a {@link UUID} to be encoded * @param out an output {@link ByteBuf} */ public static void encode(final UUID uuid, final ByteBuf out) { final long mostSignificantBits = uuid.getMostSignificantBits(); out.writeIntLE((int)((mostSignificantBits & 0xFFFFFFFF00000000L) >>> 32)); out.writeShortLE((short)((mostSignificantBits & 0x00000000FFFF0000L) >>> 16)); out.writeShortLE((short)(mostSignificantBits & 0x000000000000FFFFL)); final long leastSignificantBits = uuid.getLeastSignificantBits(); out.writeShort((short)((leastSignificantBits & 0xFFFF000000000000L) >>> (32 + 16))); out.writeShort((short)((leastSignificantBits & 0x0000FFFF00000000L) >>> 32)); out.writeInt((int)(leastSignificantBits & 0x00000000FFFFFFFFL)); } }
class RntbdUUID { public static final UUID EMPTY = new UUID(0L, 0L); private RntbdUUID() { } /** * Decode a {@link UUID} as serialized by Microsoft APIs like {@code System.Guid.ToByteArray} * * @param bytes a {@link byte} array containing the serialized {@link UUID} to be decoded * @return a new {@link UUID} */ public static UUID decode(final byte[] bytes) { return decode(Unpooled.wrappedBuffer(bytes)); } /** * Decode a {@link UUID} as serialized by Microsoft APIs like {@code System.Guid.ToByteArray} * * @param in a {@link ByteBuf} containing the serialized {@link UUID} to be decoded * @return a new {@link UUID} */ public static UUID decode(final ByteBuf in) { checkNotNull(in, "in"); if (in.readableBytes() < 2 * Long.BYTES) { final String reason = Strings.lenientFormat("invalid frame length: %s", in.readableBytes()); throw new CorruptedFrameException(reason); } long mostSignificantBits = in.readUnsignedIntLE() << 32; mostSignificantBits |= (0x000000000000FFFFL & in.readShortLE()) << 16; mostSignificantBits |= (0x000000000000FFFFL & in.readShortLE()); long leastSignificantBits = (0x000000000000FFFFL & in.readShort()) << (32 + 16); for (int shift = 32 + 8; shift >= 0; shift -= 8) { leastSignificantBits |= (0x00000000000000FFL & in.readByte()) << shift; } return new UUID(mostSignificantBits, leastSignificantBits); } /** * Encodes a {@link UUID} as serialized by Microsoft APIs like {@code System.Guid.ToByteArray} * * @param uuid a {@link UUID} to be encoded * @return a new byte array containing the encoded */ /** * Encodes a {@link UUID} as serialized by Microsoft APIs like {@code System.Guid.ToByteArray} * * @param uuid a {@link UUID} to be encoded * @param out an output {@link ByteBuf} */ public static void encode(final UUID uuid, final ByteBuf out) { final long mostSignificantBits = uuid.getMostSignificantBits(); out.writeIntLE((int)((mostSignificantBits & 0xFFFFFFFF00000000L) >>> 32)); out.writeShortLE((short)((mostSignificantBits & 0x00000000FFFF0000L) >>> 16)); out.writeShortLE((short)(mostSignificantBits & 0x000000000000FFFFL)); final long leastSignificantBits = uuid.getLeastSignificantBits(); out.writeShort((short)((leastSignificantBits & 0xFFFF000000000000L) >>> (32 + 16))); out.writeShort((short)((leastSignificantBits & 0x0000FFFF00000000L) >>> 32)); out.writeInt((int)(leastSignificantBits & 0x00000000FFFFFFFFL)); } }
Are we allowed to have no hostname in listener? Ideally if we want to clear a hostname, code should provide a `withoutHostname` in update flow.
public ApplicationGatewayListenerImpl withHostname(String hostname) { this.innerModel().withHostname(null); if (hostname == null) { this.innerModel().withHostNames(null); } else { List<String> hostNames = new ArrayList<>(); hostNames.add(hostname); this.innerModel().withHostNames(hostNames); } return this; }
this.innerModel().withHostNames(null);
public ApplicationGatewayListenerImpl withHostname(String hostname) { if (hostname != null) { this.innerModel().withHostname(null); List<String> hostNames = new ArrayList<>(); hostNames.add(hostname); this.innerModel().withHostNames(hostNames); } return this; }
class ApplicationGatewayListenerImpl extends ChildResourceImpl<ApplicationGatewayHttpListener, ApplicationGatewayImpl, ApplicationGateway> implements ApplicationGatewayListener, ApplicationGatewayListener.Definition<ApplicationGateway.DefinitionStages.WithCreate>, ApplicationGatewayListener.UpdateDefinition<ApplicationGateway.Update>, ApplicationGatewayListener.Update { ApplicationGatewayListenerImpl(ApplicationGatewayHttpListener inner, ApplicationGatewayImpl parent) { super(inner, parent); } @Override public String networkId() { ApplicationGatewayFrontend frontend = this.frontend(); if (frontend != null) { return frontend.networkId(); } else { return null; } } @Override public String subnetName() { ApplicationGatewayFrontend frontend = this.frontend(); if (frontend != null) { return frontend.subnetName(); } else { return null; } } @Override public boolean requiresServerNameIndication() { if (this.innerModel().requireServerNameIndication() != null) { return this.innerModel().requireServerNameIndication(); } else { return false; } } @Override public String hostname() { if (this.innerModel().hostname() != null) { return this.innerModel().hostname(); } if (!CoreUtils.isNullOrEmpty(this.innerModel().hostNames())) { return this.innerModel().hostNames().get(0); } return null; } @Override public String publicIpAddressId() { final ApplicationGatewayFrontend frontend = this.frontend(); if (frontend == null) { return null; } else { return frontend.publicIpAddressId(); } } @Override public PublicIpAddress getPublicIpAddress() { return this.getPublicIpAddressAsync().block(); } @Override public Mono<PublicIpAddress> getPublicIpAddressAsync() { String pipId = this.publicIpAddressId(); return pipId == null ? Mono.empty() : this.parent().manager().publicIpAddresses().getByIdAsync(pipId); } @Override public String name() { return this.innerModel().name(); } @Override public ApplicationGatewaySslCertificate sslCertificate() { SubResource certRef = this.innerModel().sslCertificate(); if (certRef == null) { return null; } String name = ResourceUtils.nameFromResourceId(certRef.id()); return this.parent().sslCertificates().get(name); } @Override public ApplicationGatewayProtocol protocol() { return this.innerModel().protocol(); } @Override public int frontendPortNumber() { String name = this.frontendPortName(); if (name == null) { return 0; } else if (!this.parent().frontendPorts().containsKey(name)) { return 0; } else { return this.parent().frontendPorts().get(name); } } @Override public String frontendPortName() { if (this.innerModel().frontendPort() != null) { return ResourceUtils.nameFromResourceId(this.innerModel().frontendPort().id()); } else { return null; } } @Override public ApplicationGatewayFrontend frontend() { final SubResource frontendInner = this.innerModel().frontendIpConfiguration(); if (frontendInner == null) { return null; } else { final String frontendName = ResourceUtils.nameFromResourceId(frontendInner.id()); return this.parent().frontends().get(frontendName); } } @Override public ApplicationGatewayImpl attach() { this.parent().withHttpListener(this); return this.parent(); } private ApplicationGatewayListenerImpl withFrontend(String name) { SubResource frontendRef = new SubResource().withId(this.parent().futureResourceId() + "/frontendIPConfigurations/" + name); this.innerModel().withFrontendIpConfiguration(frontendRef); return this; } @Override public ApplicationGatewayListenerImpl withFrontendPort(String name) { SubResource portRef = new SubResource().withId(this.parent().futureResourceId() + "/frontendPorts/" + name); this.innerModel().withFrontendPort(portRef); return this; } @Override public ApplicationGatewayListenerImpl withFrontendPort(int portNumber) { String portName = this.parent().frontendPortNameFromNumber(portNumber); if (portName == null) { portName = this.parent().manager().resourceManager().internalContext() .randomResourceName("port", 9); this.parent().withFrontendPort(portNumber, portName); } return this.withFrontendPort(portName); } @Override public ApplicationGatewayListenerImpl withSslCertificate(String name) { SubResource certRef = new SubResource().withId(this.parent().futureResourceId() + "/sslCertificates/" + name); this.innerModel().withSslCertificate(certRef); return this; } @Override public ApplicationGatewayListenerImpl withSslCertificateFromKeyVaultSecretId(String keyVaultSecretId) { return withSslCertificateFromKeyVaultSecretId(keyVaultSecretId, null); } private ApplicationGatewayListenerImpl withSslCertificateFromKeyVaultSecretId( String keyVaultSecretId, String name) { if (name == null) { name = this.parent().manager().resourceManager().internalContext() .randomResourceName("cert", 10); } this.parent().defineSslCertificate(name).withKeyVaultSecretId(keyVaultSecretId).attach(); return this; } @Override public ApplicationGatewayListenerImpl withSslCertificateFromPfxFile(File pfxFile) throws IOException { return withSslCertificateFromPfxFile(pfxFile, null); } private ApplicationGatewayListenerImpl withSslCertificateFromPfxFile(File pfxFile, String name) throws IOException { if (name == null) { name = this.parent().manager().resourceManager().internalContext() .randomResourceName("cert", 10); } this.parent().defineSslCertificate(name).withPfxFromFile(pfxFile).attach(); return this.withSslCertificate(name); } @Override public ApplicationGatewayListenerImpl withSslCertificatePassword(String password) { ApplicationGatewaySslCertificateImpl sslCert = (ApplicationGatewaySslCertificateImpl) this.sslCertificate(); if (sslCert != null) { sslCert.withPfxPassword(password); } return this; } @Override public ApplicationGatewayListenerImpl withHttp() { this.innerModel().withProtocol(ApplicationGatewayProtocol.HTTP); return this; } @Override public ApplicationGatewayListenerImpl withHttps() { this.innerModel().withProtocol(ApplicationGatewayProtocol.HTTPS); return this; } @Override @Override public ApplicationGatewayListenerImpl withServerNameIndication() { this.innerModel().withRequireServerNameIndication(true); return this; } @Override public ApplicationGatewayListenerImpl withoutServerNameIndication() { this.innerModel().withRequireServerNameIndication(false); return this; } @Override public ApplicationGatewayListenerImpl withPrivateFrontend() { this.withFrontend(this.parent().ensureDefaultPrivateFrontend().name()); return this; } @Override public ApplicationGatewayListenerImpl withPublicFrontend() { this.withFrontend(this.parent().ensureDefaultPublicFrontend().name()); return this; } }
class ApplicationGatewayListenerImpl extends ChildResourceImpl<ApplicationGatewayHttpListener, ApplicationGatewayImpl, ApplicationGateway> implements ApplicationGatewayListener, ApplicationGatewayListener.Definition<ApplicationGateway.DefinitionStages.WithCreate>, ApplicationGatewayListener.UpdateDefinition<ApplicationGateway.Update>, ApplicationGatewayListener.Update { ApplicationGatewayListenerImpl(ApplicationGatewayHttpListener inner, ApplicationGatewayImpl parent) { super(inner, parent); } @Override public String networkId() { ApplicationGatewayFrontend frontend = this.frontend(); if (frontend != null) { return frontend.networkId(); } else { return null; } } @Override public String subnetName() { ApplicationGatewayFrontend frontend = this.frontend(); if (frontend != null) { return frontend.subnetName(); } else { return null; } } @Override public boolean requiresServerNameIndication() { if (this.innerModel().requireServerNameIndication() != null) { return this.innerModel().requireServerNameIndication(); } else { return false; } } @Override public String hostname() { if (this.innerModel().hostname() != null) { return this.innerModel().hostname(); } if (!CoreUtils.isNullOrEmpty(this.innerModel().hostNames())) { return this.innerModel().hostNames().get(0); } return null; } @Override public List<String> hostnames() { if (this.innerModel().hostname() != null) { return Collections.singletonList(this.innerModel().hostname()); } if (CoreUtils.isNullOrEmpty(this.innerModel().hostNames())) { return Collections.emptyList(); } return Collections.unmodifiableList(this.innerModel().hostNames()); } @Override public String publicIpAddressId() { final ApplicationGatewayFrontend frontend = this.frontend(); if (frontend == null) { return null; } else { return frontend.publicIpAddressId(); } } @Override public PublicIpAddress getPublicIpAddress() { return this.getPublicIpAddressAsync().block(); } @Override public Mono<PublicIpAddress> getPublicIpAddressAsync() { String pipId = this.publicIpAddressId(); return pipId == null ? Mono.empty() : this.parent().manager().publicIpAddresses().getByIdAsync(pipId); } @Override public String name() { return this.innerModel().name(); } @Override public ApplicationGatewaySslCertificate sslCertificate() { SubResource certRef = this.innerModel().sslCertificate(); if (certRef == null) { return null; } String name = ResourceUtils.nameFromResourceId(certRef.id()); return this.parent().sslCertificates().get(name); } @Override public ApplicationGatewayProtocol protocol() { return this.innerModel().protocol(); } @Override public int frontendPortNumber() { String name = this.frontendPortName(); if (name == null) { return 0; } else if (!this.parent().frontendPorts().containsKey(name)) { return 0; } else { return this.parent().frontendPorts().get(name); } } @Override public String frontendPortName() { if (this.innerModel().frontendPort() != null) { return ResourceUtils.nameFromResourceId(this.innerModel().frontendPort().id()); } else { return null; } } @Override public ApplicationGatewayFrontend frontend() { final SubResource frontendInner = this.innerModel().frontendIpConfiguration(); if (frontendInner == null) { return null; } else { final String frontendName = ResourceUtils.nameFromResourceId(frontendInner.id()); return this.parent().frontends().get(frontendName); } } @Override public ApplicationGatewayImpl attach() { this.parent().withHttpListener(this); return this.parent(); } private ApplicationGatewayListenerImpl withFrontend(String name) { SubResource frontendRef = new SubResource().withId(this.parent().futureResourceId() + "/frontendIPConfigurations/" + name); this.innerModel().withFrontendIpConfiguration(frontendRef); return this; } @Override public ApplicationGatewayListenerImpl withFrontendPort(String name) { SubResource portRef = new SubResource().withId(this.parent().futureResourceId() + "/frontendPorts/" + name); this.innerModel().withFrontendPort(portRef); return this; } @Override public ApplicationGatewayListenerImpl withFrontendPort(int portNumber) { String portName = this.parent().frontendPortNameFromNumber(portNumber); if (portName == null) { portName = this.parent().manager().resourceManager().internalContext() .randomResourceName("port", 9); this.parent().withFrontendPort(portNumber, portName); } return this.withFrontendPort(portName); } @Override public ApplicationGatewayListenerImpl withSslCertificate(String name) { SubResource certRef = new SubResource().withId(this.parent().futureResourceId() + "/sslCertificates/" + name); this.innerModel().withSslCertificate(certRef); return this; } @Override public ApplicationGatewayListenerImpl withSslCertificateFromKeyVaultSecretId(String keyVaultSecretId) { return withSslCertificateFromKeyVaultSecretId(keyVaultSecretId, null); } private ApplicationGatewayListenerImpl withSslCertificateFromKeyVaultSecretId( String keyVaultSecretId, String name) { if (name == null) { name = this.parent().manager().resourceManager().internalContext() .randomResourceName("cert", 10); } this.parent().defineSslCertificate(name).withKeyVaultSecretId(keyVaultSecretId).attach(); return this; } @Override public ApplicationGatewayListenerImpl withSslCertificateFromPfxFile(File pfxFile) throws IOException { return withSslCertificateFromPfxFile(pfxFile, null); } private ApplicationGatewayListenerImpl withSslCertificateFromPfxFile(File pfxFile, String name) throws IOException { if (name == null) { name = this.parent().manager().resourceManager().internalContext() .randomResourceName("cert", 10); } this.parent().defineSslCertificate(name).withPfxFromFile(pfxFile).attach(); return this.withSslCertificate(name); } @Override public ApplicationGatewayListenerImpl withSslCertificatePassword(String password) { ApplicationGatewaySslCertificateImpl sslCert = (ApplicationGatewaySslCertificateImpl) this.sslCertificate(); if (sslCert != null) { sslCert.withPfxPassword(password); } return this; } @Override public ApplicationGatewayListenerImpl withHttp() { this.innerModel().withProtocol(ApplicationGatewayProtocol.HTTP); return this; } @Override public ApplicationGatewayListenerImpl withHttps() { this.innerModel().withProtocol(ApplicationGatewayProtocol.HTTPS); return this; } @Override @Override public ApplicationGatewayListenerImpl withHostnames(List<String> hostnames) { if (!CoreUtils.isNullOrEmpty(hostnames)) { this.innerModel().withHostname(null); this.innerModel().withHostNames(hostnames); } return this; } @Override public ApplicationGatewayListenerImpl withServerNameIndication() { this.innerModel().withRequireServerNameIndication(true); return this; } @Override public ApplicationGatewayListenerImpl withoutServerNameIndication() { this.innerModel().withRequireServerNameIndication(false); return this; } @Override public ApplicationGatewayListenerImpl withPrivateFrontend() { this.withFrontend(this.parent().ensureDefaultPrivateFrontend().name()); return this; } @Override public ApplicationGatewayListenerImpl withPublicFrontend() { this.withFrontend(this.parent().ensureDefaultPublicFrontend().name()); return this; } }
Do we need this test?
public void canSpecifyWildcardListeners() { String appGatewayName = generateRandomResourceName("agwaf", 15); String appPublicIp = generateRandomResourceName("pip", 15); PublicIpAddress pip = networkManager .publicIpAddresses() .define(appPublicIp) .withRegion(Region.US_EAST) .withNewResourceGroup(rgName) .withSku(PublicIPSkuType.STANDARD) .withStaticIP() .create(); String listenerName = "listener1"; String hostname1 = "my.contoso.com"; ApplicationGateway gateway = networkManager.applicationGateways() .define(appGatewayName) .withRegion(Region.US_EAST) .withExistingResourceGroup(rgName) .defineRequestRoutingRule("rule80") .fromPublicFrontend() .fromFrontendHttpPort(80) .toBackendHttpPort(8080) .toBackendIPAddress("11.1.1.1") .toBackendIPAddress("11.1.1.2") .withCookieBasedAffinity() .attach() .defineListener(listenerName) .withPublicFrontend() .withFrontendPort(9000) .withHttp() .withHostname(hostname1) .attach() .withTier(ApplicationGatewayTier.WAF_V2) .withSize(ApplicationGatewaySkuName.WAF_V2) .withAutoScale(2, 5) .withExistingPublicIpAddress(pip) .create(); Assertions.assertEquals(hostname1, gateway.listeners().get(listenerName).hostname()); String hostname2 = "*.contoso.com"; gateway.update() .updateListener(listenerName) .withHostname(hostname2) .parent() .apply(); Assertions.assertEquals(hostname2, gateway.listeners().get(listenerName).hostname()); gateway.innerModel().httpListeners().iterator().next().withHostNames(null).withHostname(hostname1); gateway.update() .apply(); Assertions.assertEquals(hostname1, gateway.listeners().get(listenerName).hostname()); }
Assertions.assertEquals(hostname1, gateway.listeners().get(listenerName).hostname());
public void canSpecifyWildcardListeners() { String appGatewayName = generateRandomResourceName("agwaf", 15); String appPublicIp = generateRandomResourceName("pip", 15); PublicIpAddress pip = networkManager .publicIpAddresses() .define(appPublicIp) .withRegion(Region.US_EAST) .withNewResourceGroup(rgName) .withSku(PublicIPSkuType.STANDARD) .withStaticIP() .create(); String listener1 = "listener1"; String hostname1 = "my.contoso.com"; ApplicationGateway gateway = networkManager.applicationGateways() .define(appGatewayName) .withRegion(Region.US_EAST) .withExistingResourceGroup(rgName) .defineRequestRoutingRule("rule80") .fromPublicFrontend() .fromFrontendHttpPort(80) .toBackendHttpPort(8080) .toBackendIPAddress("11.1.1.1") .toBackendIPAddress("11.1.1.2") .withCookieBasedAffinity() .attach() .defineListener(listener1) .withPublicFrontend() .withFrontendPort(9000) .withHttp() .withHostname(hostname1) .attach() .withTier(ApplicationGatewayTier.WAF_V2) .withSize(ApplicationGatewaySkuName.WAF_V2) .withAutoScale(2, 5) .withExistingPublicIpAddress(pip) .create(); Assertions.assertEquals(hostname1, gateway.listeners().get(listener1).hostname()); String hostname2 = "*.contoso.com"; gateway.update() .updateListener(listener1) .withHostname(hostname2) .parent() .apply(); Assertions.assertEquals(hostname2, gateway.listeners().get(listener1).hostname()); List<String> hostnames = new ArrayList<>(); hostnames.add(hostname1); hostnames.add(hostname2); gateway.update() .updateListener(listener1) .withHostnames(hostnames) .parent() .apply(); Assertions.assertEquals(hostnames, gateway.listeners().get(listener1).hostnames()); }
class ApplicationGatewayTests extends NetworkManagementTest { @Test public void canCRUDApplicationGatewayWithWAF() throws Exception { String appGatewayName = generateRandomResourceName("agwaf", 15); String appPublicIp = generateRandomResourceName("pip", 15); PublicIpAddress pip = networkManager .publicIpAddresses() .define(appPublicIp) .withRegion(Region.US_EAST) .withNewResourceGroup(rgName) .withSku(PublicIPSkuType.STANDARD) .withStaticIP() .create(); ApplicationGateway appGateway = networkManager .applicationGateways() .define(appGatewayName) .withRegion(Region.US_EAST) .withExistingResourceGroup(rgName) .defineRequestRoutingRule("rule1") .fromPublicFrontend() .fromFrontendHttpPort(80) .toBackendHttpPort(8080) .toBackendIPAddress("11.1.1.1") .toBackendIPAddress("11.1.1.2") .attach() .withExistingPublicIpAddress(pip) .withTier(ApplicationGatewayTier.WAF_V2) .withSize(ApplicationGatewaySkuName.WAF_V2) .withAutoScale(2, 5) .withWebApplicationFirewall(true, ApplicationGatewayFirewallMode.PREVENTION) .create(); Assertions.assertTrue(appGateway != null); Assertions.assertTrue(ApplicationGatewayTier.WAF_V2.equals(appGateway.tier())); Assertions.assertTrue(ApplicationGatewaySkuName.WAF_V2.equals(appGateway.size())); Assertions.assertTrue(appGateway.autoscaleConfiguration().minCapacity() == 2); Assertions.assertTrue(appGateway.autoscaleConfiguration().maxCapacity() == 5); ApplicationGatewayWebApplicationFirewallConfiguration config = appGateway.webApplicationFirewallConfiguration(); config.withFileUploadLimitInMb(200); config .withDisabledRuleGroups( Arrays .asList( new ApplicationGatewayFirewallDisabledRuleGroup() .withRuleGroupName("REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION"))); config.withRequestBodyCheck(true); config.withMaxRequestBodySizeInKb(64); config .withExclusions( Arrays .asList( new ApplicationGatewayFirewallExclusion() .withMatchVariable("RequestHeaderNames") .withSelectorMatchOperator("StartsWith") .withSelector("User-Agent"))); appGateway.update().withWebApplicationFirewall(config).apply(); appGateway.refresh(); Assertions.assertTrue(appGateway.webApplicationFirewallConfiguration().fileUploadLimitInMb() == 200); Assertions.assertTrue(appGateway.webApplicationFirewallConfiguration().requestBodyCheck()); Assertions .assertEquals(appGateway.webApplicationFirewallConfiguration().maxRequestBodySizeInKb(), (Integer) 64); Assertions.assertEquals(appGateway.webApplicationFirewallConfiguration().exclusions().size(), 1); Assertions .assertEquals( appGateway.webApplicationFirewallConfiguration().exclusions().get(0).matchVariable(), "RequestHeaderNames"); Assertions .assertEquals( appGateway.webApplicationFirewallConfiguration().exclusions().get(0).selectorMatchOperator(), "StartsWith"); Assertions .assertEquals( appGateway.webApplicationFirewallConfiguration().exclusions().get(0).selector(), "User-Agent"); Assertions.assertEquals(appGateway.webApplicationFirewallConfiguration().disabledRuleGroups().size(), 1); Assertions .assertEquals( appGateway.webApplicationFirewallConfiguration().disabledRuleGroups().get(0).ruleGroupName(), "REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION"); } @Test @Test @Disabled("Need client id for key vault usage") public void canCreateApplicationGatewayWithSecret() throws Exception { String appGatewayName = generateRandomResourceName("agwaf", 15); String appPublicIp = generateRandomResourceName("pip", 15); String identityName = generateRandomResourceName("id", 10); PublicIpAddress pip = networkManager .publicIpAddresses() .define(appPublicIp) .withRegion(Region.US_EAST) .withNewResourceGroup(rgName) .withSku(PublicIPSkuType.STANDARD) .withStaticIP() .create(); Identity identity = msiManager .identities() .define(identityName) .withRegion(Region.US_EAST) .withExistingResourceGroup(rgName) .create(); Assertions.assertNotNull(identity.name()); Assertions.assertNotNull(identity.principalId()); Secret secret1 = createKeyVaultSecret(clientIdFromFile(), identity.principalId()); Secret secret2 = createKeyVaultSecret(clientIdFromFile(), identity.principalId()); ManagedServiceIdentity serviceIdentity = createManagedServiceIdentityFromIdentity(identity); ApplicationGateway appGateway = networkManager .applicationGateways() .define(appGatewayName) .withRegion(Region.US_EAST) .withExistingResourceGroup(rgName) .defineRequestRoutingRule("rule1") .fromPublicFrontend() .fromFrontendHttpsPort(443) .withSslCertificate("ssl1") .toBackendHttpPort(8080) .toBackendIPAddress("11.1.1.1") .toBackendIPAddress("11.1.1.2") .attach() .withIdentity(serviceIdentity) .defineSslCertificate("ssl1") .withKeyVaultSecretId(secret1.id()) .attach() .withExistingPublicIpAddress(pip) .withTier(ApplicationGatewayTier.WAF_V2) .withSize(ApplicationGatewaySkuName.WAF_V2) .withAutoScale(2, 5) .withWebApplicationFirewall(true, ApplicationGatewayFirewallMode.PREVENTION) .create(); Assertions.assertEquals(secret1.id(), appGateway.sslCertificates().get("ssl1").keyVaultSecretId()); Assertions .assertEquals( secret1.id(), appGateway.requestRoutingRules().get("rule1").sslCertificate().keyVaultSecretId()); appGateway = appGateway.update().defineSslCertificate("ssl2").withKeyVaultSecretId(secret2.id()).attach().apply(); Assertions.assertEquals(secret2.id(), appGateway.sslCertificates().get("ssl2").keyVaultSecretId()); } @Test @DoNotRecord(skipInPlayback = true) public void canCreateApplicationGatewayWithSslCertificate() throws Exception { String appGatewayName = generateRandomResourceName("agwaf", 15); String appPublicIp = generateRandomResourceName("pip", 15); String identityName = generateRandomResourceName("id", 10); PublicIpAddress pip = networkManager .publicIpAddresses() .define(appPublicIp) .withRegion(Region.US_EAST) .withNewResourceGroup(rgName) .withSku(PublicIPSkuType.STANDARD) .withStaticIP() .create(); Identity identity = msiManager .identities() .define(identityName) .withRegion(Region.US_EAST) .withExistingResourceGroup(rgName) .create(); Assertions.assertNotNull(identity.name()); Assertions.assertNotNull(identity.principalId()); ManagedServiceIdentity serviceIdentity = createManagedServiceIdentityFromIdentity(identity); String secretId = createKeyVaultCertificate(clientIdFromFile(), identity.principalId()); ApplicationGateway appGateway = networkManager .applicationGateways() .define(appGatewayName) .withRegion(Region.US_EAST) .withExistingResourceGroup(rgName) .defineRequestRoutingRule("rule1") .fromPublicFrontend() .fromFrontendHttpsPort(443) .withSslCertificate("ssl1") .toBackendHttpPort(8080) .toBackendIPAddress("11.1.1.1") .toBackendIPAddress("11.1.1.2") .attach() .withIdentity(serviceIdentity) .defineSslCertificate("ssl1") .withKeyVaultSecretId(secretId) .attach() .withExistingPublicIpAddress(pip) .withTier(ApplicationGatewayTier.WAF_V2) .withSize(ApplicationGatewaySkuName.WAF_V2) .withAutoScale(2, 5) .withWebApplicationFirewall(true, ApplicationGatewayFirewallMode.PREVENTION) .create(); Assertions.assertEquals(secretId, appGateway.sslCertificates().get("ssl1").keyVaultSecretId()); Assertions.assertEquals(secretId, appGateway.requestRoutingRules().get("rule1").sslCertificate().keyVaultSecretId()); } private String createKeyVaultCertificate(String servicePrincipal, String identityPrincipal) { String vaultName = generateRandomResourceName("vlt", 10); String secretName = generateRandomResourceName("srt", 10); Vault vault = keyVaultManager .vaults() .define(vaultName) .withRegion(Region.US_EAST) .withExistingResourceGroup(rgName) .defineAccessPolicy() .forServicePrincipal(servicePrincipal) .allowSecretAllPermissions() .allowCertificateAllPermissions() .attach() .defineAccessPolicy() .forObjectId(identityPrincipal) .allowSecretAllPermissions() .attach() .withAccessFromAzureServices() .withDeploymentEnabled() .create(); CertificateClient certificateClient = new CertificateClientBuilder() .vaultUrl(vault.vaultUri()) .pipeline(vault.vaultHttpPipeline()) .buildClient(); KeyVaultCertificateWithPolicy certificate = certificateClient.beginCreateCertificate(secretName, CertificatePolicy.getDefault()).getFinalResult(); return certificate.getSecretId(); } private Secret createKeyVaultSecret(String servicePrincipal, String identityPrincipal) throws Exception { String vaultName = generateRandomResourceName("vlt", 10); String secretName = generateRandomResourceName("srt", 10); BufferedReader buff = new BufferedReader(new FileReader(new File(getClass().getClassLoader() .getResource("test.certificate").getFile()))); String secretValue = buff.readLine(); Vault vault = keyVaultManager .vaults() .define(vaultName) .withRegion(Region.US_EAST) .withExistingResourceGroup(rgName) .defineAccessPolicy() .forServicePrincipal(servicePrincipal) .allowSecretAllPermissions() .attach() .defineAccessPolicy() .forObjectId(identityPrincipal) .allowSecretAllPermissions() .attach() .withAccessFromAzureServices() .withDeploymentEnabled() .create(); return vault.secrets().define(secretName).withValue(secretValue).create(); } private static ManagedServiceIdentity createManagedServiceIdentityFromIdentity(Identity identity) throws Exception { ObjectMapper mapper = new ObjectMapper(); JsonNode userAssignedIdentitiesValueObject = mapper.createObjectNode(); ((ObjectNode) userAssignedIdentitiesValueObject).put("principalId", identity.principalId()); ((ObjectNode) userAssignedIdentitiesValueObject).put("clientId", identity.clientId()); ManagedServiceIdentityUserAssignedIdentities userAssignedIdentitiesValue = new JacksonAdapter() .deserialize( mapper.writerWithDefaultPrettyPrinter().writeValueAsString(userAssignedIdentitiesValueObject), ManagedServiceIdentityUserAssignedIdentities.class, SerializerEncoding.JSON); Map<String, ManagedServiceIdentityUserAssignedIdentities> userAssignedIdentities = new HashMap<>(); userAssignedIdentities.put(identity.id(), userAssignedIdentitiesValue); ManagedServiceIdentity serviceIdentity = new ManagedServiceIdentity(); serviceIdentity.withType(ResourceIdentityType.USER_ASSIGNED); serviceIdentity.withUserAssignedIdentities(userAssignedIdentities); return serviceIdentity; } }
class ApplicationGatewayTests extends NetworkManagementTest { @Test public void canCRUDApplicationGatewayWithWAF() throws Exception { String appGatewayName = generateRandomResourceName("agwaf", 15); String appPublicIp = generateRandomResourceName("pip", 15); PublicIpAddress pip = networkManager .publicIpAddresses() .define(appPublicIp) .withRegion(Region.US_EAST) .withNewResourceGroup(rgName) .withSku(PublicIPSkuType.STANDARD) .withStaticIP() .create(); ApplicationGateway appGateway = networkManager .applicationGateways() .define(appGatewayName) .withRegion(Region.US_EAST) .withExistingResourceGroup(rgName) .defineRequestRoutingRule("rule1") .fromPublicFrontend() .fromFrontendHttpPort(80) .toBackendHttpPort(8080) .toBackendIPAddress("11.1.1.1") .toBackendIPAddress("11.1.1.2") .attach() .withExistingPublicIpAddress(pip) .withTier(ApplicationGatewayTier.WAF_V2) .withSize(ApplicationGatewaySkuName.WAF_V2) .withAutoScale(2, 5) .withWebApplicationFirewall(true, ApplicationGatewayFirewallMode.PREVENTION) .create(); Assertions.assertTrue(appGateway != null); Assertions.assertTrue(ApplicationGatewayTier.WAF_V2.equals(appGateway.tier())); Assertions.assertTrue(ApplicationGatewaySkuName.WAF_V2.equals(appGateway.size())); Assertions.assertTrue(appGateway.autoscaleConfiguration().minCapacity() == 2); Assertions.assertTrue(appGateway.autoscaleConfiguration().maxCapacity() == 5); ApplicationGatewayWebApplicationFirewallConfiguration config = appGateway.webApplicationFirewallConfiguration(); config.withFileUploadLimitInMb(200); config .withDisabledRuleGroups( Arrays .asList( new ApplicationGatewayFirewallDisabledRuleGroup() .withRuleGroupName("REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION"))); config.withRequestBodyCheck(true); config.withMaxRequestBodySizeInKb(64); config .withExclusions( Arrays .asList( new ApplicationGatewayFirewallExclusion() .withMatchVariable("RequestHeaderNames") .withSelectorMatchOperator("StartsWith") .withSelector("User-Agent"))); appGateway.update().withWebApplicationFirewall(config).apply(); appGateway.refresh(); Assertions.assertTrue(appGateway.webApplicationFirewallConfiguration().fileUploadLimitInMb() == 200); Assertions.assertTrue(appGateway.webApplicationFirewallConfiguration().requestBodyCheck()); Assertions .assertEquals(appGateway.webApplicationFirewallConfiguration().maxRequestBodySizeInKb(), (Integer) 64); Assertions.assertEquals(appGateway.webApplicationFirewallConfiguration().exclusions().size(), 1); Assertions .assertEquals( appGateway.webApplicationFirewallConfiguration().exclusions().get(0).matchVariable(), "RequestHeaderNames"); Assertions .assertEquals( appGateway.webApplicationFirewallConfiguration().exclusions().get(0).selectorMatchOperator(), "StartsWith"); Assertions .assertEquals( appGateway.webApplicationFirewallConfiguration().exclusions().get(0).selector(), "User-Agent"); Assertions.assertEquals(appGateway.webApplicationFirewallConfiguration().disabledRuleGroups().size(), 1); Assertions .assertEquals( appGateway.webApplicationFirewallConfiguration().disabledRuleGroups().get(0).ruleGroupName(), "REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION"); } @Test @Test @Disabled("Need client id for key vault usage") public void canCreateApplicationGatewayWithSecret() throws Exception { String appGatewayName = generateRandomResourceName("agwaf", 15); String appPublicIp = generateRandomResourceName("pip", 15); String identityName = generateRandomResourceName("id", 10); PublicIpAddress pip = networkManager .publicIpAddresses() .define(appPublicIp) .withRegion(Region.US_EAST) .withNewResourceGroup(rgName) .withSku(PublicIPSkuType.STANDARD) .withStaticIP() .create(); Identity identity = msiManager .identities() .define(identityName) .withRegion(Region.US_EAST) .withExistingResourceGroup(rgName) .create(); Assertions.assertNotNull(identity.name()); Assertions.assertNotNull(identity.principalId()); Secret secret1 = createKeyVaultSecret(clientIdFromFile(), identity.principalId()); Secret secret2 = createKeyVaultSecret(clientIdFromFile(), identity.principalId()); ManagedServiceIdentity serviceIdentity = createManagedServiceIdentityFromIdentity(identity); ApplicationGateway appGateway = networkManager .applicationGateways() .define(appGatewayName) .withRegion(Region.US_EAST) .withExistingResourceGroup(rgName) .defineRequestRoutingRule("rule1") .fromPublicFrontend() .fromFrontendHttpsPort(443) .withSslCertificate("ssl1") .toBackendHttpPort(8080) .toBackendIPAddress("11.1.1.1") .toBackendIPAddress("11.1.1.2") .attach() .withIdentity(serviceIdentity) .defineSslCertificate("ssl1") .withKeyVaultSecretId(secret1.id()) .attach() .withExistingPublicIpAddress(pip) .withTier(ApplicationGatewayTier.WAF_V2) .withSize(ApplicationGatewaySkuName.WAF_V2) .withAutoScale(2, 5) .withWebApplicationFirewall(true, ApplicationGatewayFirewallMode.PREVENTION) .create(); Assertions.assertEquals(secret1.id(), appGateway.sslCertificates().get("ssl1").keyVaultSecretId()); Assertions .assertEquals( secret1.id(), appGateway.requestRoutingRules().get("rule1").sslCertificate().keyVaultSecretId()); appGateway = appGateway.update().defineSslCertificate("ssl2").withKeyVaultSecretId(secret2.id()).attach().apply(); Assertions.assertEquals(secret2.id(), appGateway.sslCertificates().get("ssl2").keyVaultSecretId()); } @Test @DoNotRecord(skipInPlayback = true) public void canCreateApplicationGatewayWithSslCertificate() throws Exception { String appGatewayName = generateRandomResourceName("agwaf", 15); String appPublicIp = generateRandomResourceName("pip", 15); String identityName = generateRandomResourceName("id", 10); PublicIpAddress pip = networkManager .publicIpAddresses() .define(appPublicIp) .withRegion(Region.US_EAST) .withNewResourceGroup(rgName) .withSku(PublicIPSkuType.STANDARD) .withStaticIP() .create(); Identity identity = msiManager .identities() .define(identityName) .withRegion(Region.US_EAST) .withExistingResourceGroup(rgName) .create(); Assertions.assertNotNull(identity.name()); Assertions.assertNotNull(identity.principalId()); ManagedServiceIdentity serviceIdentity = createManagedServiceIdentityFromIdentity(identity); String secretId = createKeyVaultCertificate(clientIdFromFile(), identity.principalId()); ApplicationGateway appGateway = networkManager .applicationGateways() .define(appGatewayName) .withRegion(Region.US_EAST) .withExistingResourceGroup(rgName) .defineRequestRoutingRule("rule1") .fromPublicFrontend() .fromFrontendHttpsPort(443) .withSslCertificate("ssl1") .toBackendHttpPort(8080) .toBackendIPAddress("11.1.1.1") .toBackendIPAddress("11.1.1.2") .attach() .withIdentity(serviceIdentity) .defineSslCertificate("ssl1") .withKeyVaultSecretId(secretId) .attach() .withExistingPublicIpAddress(pip) .withTier(ApplicationGatewayTier.WAF_V2) .withSize(ApplicationGatewaySkuName.WAF_V2) .withAutoScale(2, 5) .withWebApplicationFirewall(true, ApplicationGatewayFirewallMode.PREVENTION) .create(); Assertions.assertEquals(secretId, appGateway.sslCertificates().get("ssl1").keyVaultSecretId()); Assertions.assertEquals(secretId, appGateway.requestRoutingRules().get("rule1").sslCertificate().keyVaultSecretId()); } private String createKeyVaultCertificate(String servicePrincipal, String identityPrincipal) { String vaultName = generateRandomResourceName("vlt", 10); String secretName = generateRandomResourceName("srt", 10); Vault vault = keyVaultManager .vaults() .define(vaultName) .withRegion(Region.US_EAST) .withExistingResourceGroup(rgName) .defineAccessPolicy() .forServicePrincipal(servicePrincipal) .allowSecretAllPermissions() .allowCertificateAllPermissions() .attach() .defineAccessPolicy() .forObjectId(identityPrincipal) .allowSecretAllPermissions() .attach() .withAccessFromAzureServices() .withDeploymentEnabled() .create(); CertificateClient certificateClient = new CertificateClientBuilder() .vaultUrl(vault.vaultUri()) .pipeline(vault.vaultHttpPipeline()) .buildClient(); KeyVaultCertificateWithPolicy certificate = certificateClient.beginCreateCertificate(secretName, CertificatePolicy.getDefault()).getFinalResult(); return certificate.getSecretId(); } private Secret createKeyVaultSecret(String servicePrincipal, String identityPrincipal) throws Exception { String vaultName = generateRandomResourceName("vlt", 10); String secretName = generateRandomResourceName("srt", 10); BufferedReader buff = new BufferedReader(new FileReader(new File(getClass().getClassLoader() .getResource("test.certificate").getFile()))); String secretValue = buff.readLine(); Vault vault = keyVaultManager .vaults() .define(vaultName) .withRegion(Region.US_EAST) .withExistingResourceGroup(rgName) .defineAccessPolicy() .forServicePrincipal(servicePrincipal) .allowSecretAllPermissions() .attach() .defineAccessPolicy() .forObjectId(identityPrincipal) .allowSecretAllPermissions() .attach() .withAccessFromAzureServices() .withDeploymentEnabled() .create(); return vault.secrets().define(secretName).withValue(secretValue).create(); } private static ManagedServiceIdentity createManagedServiceIdentityFromIdentity(Identity identity) throws Exception { ObjectMapper mapper = new ObjectMapper(); JsonNode userAssignedIdentitiesValueObject = mapper.createObjectNode(); ((ObjectNode) userAssignedIdentitiesValueObject).put("principalId", identity.principalId()); ((ObjectNode) userAssignedIdentitiesValueObject).put("clientId", identity.clientId()); ManagedServiceIdentityUserAssignedIdentities userAssignedIdentitiesValue = new JacksonAdapter() .deserialize( mapper.writerWithDefaultPrettyPrinter().writeValueAsString(userAssignedIdentitiesValueObject), ManagedServiceIdentityUserAssignedIdentities.class, SerializerEncoding.JSON); Map<String, ManagedServiceIdentityUserAssignedIdentities> userAssignedIdentities = new HashMap<>(); userAssignedIdentities.put(identity.id(), userAssignedIdentitiesValue); ManagedServiceIdentity serviceIdentity = new ManagedServiceIdentity(); serviceIdentity.withType(ResourceIdentityType.USER_ASSIGNED); serviceIdentity.withUserAssignedIdentities(userAssignedIdentities); return serviceIdentity; } }
Move this line into `if (hostname != null) {`?
public ApplicationGatewayListenerImpl withHostname(String hostname) { this.innerModel().withHostname(null); if (hostname != null) { List<String> hostNames = new ArrayList<>(); hostNames.add(hostname); this.innerModel().withHostNames(hostNames); } return this; }
this.innerModel().withHostname(null);
public ApplicationGatewayListenerImpl withHostname(String hostname) { if (hostname != null) { this.innerModel().withHostname(null); List<String> hostNames = new ArrayList<>(); hostNames.add(hostname); this.innerModel().withHostNames(hostNames); } return this; }
class ApplicationGatewayListenerImpl extends ChildResourceImpl<ApplicationGatewayHttpListener, ApplicationGatewayImpl, ApplicationGateway> implements ApplicationGatewayListener, ApplicationGatewayListener.Definition<ApplicationGateway.DefinitionStages.WithCreate>, ApplicationGatewayListener.UpdateDefinition<ApplicationGateway.Update>, ApplicationGatewayListener.Update { ApplicationGatewayListenerImpl(ApplicationGatewayHttpListener inner, ApplicationGatewayImpl parent) { super(inner, parent); } @Override public String networkId() { ApplicationGatewayFrontend frontend = this.frontend(); if (frontend != null) { return frontend.networkId(); } else { return null; } } @Override public String subnetName() { ApplicationGatewayFrontend frontend = this.frontend(); if (frontend != null) { return frontend.subnetName(); } else { return null; } } @Override public boolean requiresServerNameIndication() { if (this.innerModel().requireServerNameIndication() != null) { return this.innerModel().requireServerNameIndication(); } else { return false; } } @Override public String hostname() { if (this.innerModel().hostname() != null) { return this.innerModel().hostname(); } if (!CoreUtils.isNullOrEmpty(this.innerModel().hostNames())) { return this.innerModel().hostNames().get(0); } return null; } @Override public List<String> hostnames() { if (this.innerModel().hostname() != null) { return Collections.singletonList(this.innerModel().hostname()); } if (CoreUtils.isNullOrEmpty(this.innerModel().hostNames())) { return Collections.emptyList(); } return Collections.unmodifiableList(this.innerModel().hostNames()); } @Override public String publicIpAddressId() { final ApplicationGatewayFrontend frontend = this.frontend(); if (frontend == null) { return null; } else { return frontend.publicIpAddressId(); } } @Override public PublicIpAddress getPublicIpAddress() { return this.getPublicIpAddressAsync().block(); } @Override public Mono<PublicIpAddress> getPublicIpAddressAsync() { String pipId = this.publicIpAddressId(); return pipId == null ? Mono.empty() : this.parent().manager().publicIpAddresses().getByIdAsync(pipId); } @Override public String name() { return this.innerModel().name(); } @Override public ApplicationGatewaySslCertificate sslCertificate() { SubResource certRef = this.innerModel().sslCertificate(); if (certRef == null) { return null; } String name = ResourceUtils.nameFromResourceId(certRef.id()); return this.parent().sslCertificates().get(name); } @Override public ApplicationGatewayProtocol protocol() { return this.innerModel().protocol(); } @Override public int frontendPortNumber() { String name = this.frontendPortName(); if (name == null) { return 0; } else if (!this.parent().frontendPorts().containsKey(name)) { return 0; } else { return this.parent().frontendPorts().get(name); } } @Override public String frontendPortName() { if (this.innerModel().frontendPort() != null) { return ResourceUtils.nameFromResourceId(this.innerModel().frontendPort().id()); } else { return null; } } @Override public ApplicationGatewayFrontend frontend() { final SubResource frontendInner = this.innerModel().frontendIpConfiguration(); if (frontendInner == null) { return null; } else { final String frontendName = ResourceUtils.nameFromResourceId(frontendInner.id()); return this.parent().frontends().get(frontendName); } } @Override public ApplicationGatewayImpl attach() { this.parent().withHttpListener(this); return this.parent(); } private ApplicationGatewayListenerImpl withFrontend(String name) { SubResource frontendRef = new SubResource().withId(this.parent().futureResourceId() + "/frontendIPConfigurations/" + name); this.innerModel().withFrontendIpConfiguration(frontendRef); return this; } @Override public ApplicationGatewayListenerImpl withFrontendPort(String name) { SubResource portRef = new SubResource().withId(this.parent().futureResourceId() + "/frontendPorts/" + name); this.innerModel().withFrontendPort(portRef); return this; } @Override public ApplicationGatewayListenerImpl withFrontendPort(int portNumber) { String portName = this.parent().frontendPortNameFromNumber(portNumber); if (portName == null) { portName = this.parent().manager().resourceManager().internalContext() .randomResourceName("port", 9); this.parent().withFrontendPort(portNumber, portName); } return this.withFrontendPort(portName); } @Override public ApplicationGatewayListenerImpl withSslCertificate(String name) { SubResource certRef = new SubResource().withId(this.parent().futureResourceId() + "/sslCertificates/" + name); this.innerModel().withSslCertificate(certRef); return this; } @Override public ApplicationGatewayListenerImpl withSslCertificateFromKeyVaultSecretId(String keyVaultSecretId) { return withSslCertificateFromKeyVaultSecretId(keyVaultSecretId, null); } private ApplicationGatewayListenerImpl withSslCertificateFromKeyVaultSecretId( String keyVaultSecretId, String name) { if (name == null) { name = this.parent().manager().resourceManager().internalContext() .randomResourceName("cert", 10); } this.parent().defineSslCertificate(name).withKeyVaultSecretId(keyVaultSecretId).attach(); return this; } @Override public ApplicationGatewayListenerImpl withSslCertificateFromPfxFile(File pfxFile) throws IOException { return withSslCertificateFromPfxFile(pfxFile, null); } private ApplicationGatewayListenerImpl withSslCertificateFromPfxFile(File pfxFile, String name) throws IOException { if (name == null) { name = this.parent().manager().resourceManager().internalContext() .randomResourceName("cert", 10); } this.parent().defineSslCertificate(name).withPfxFromFile(pfxFile).attach(); return this.withSslCertificate(name); } @Override public ApplicationGatewayListenerImpl withSslCertificatePassword(String password) { ApplicationGatewaySslCertificateImpl sslCert = (ApplicationGatewaySslCertificateImpl) this.sslCertificate(); if (sslCert != null) { sslCert.withPfxPassword(password); } return this; } @Override public ApplicationGatewayListenerImpl withHttp() { this.innerModel().withProtocol(ApplicationGatewayProtocol.HTTP); return this; } @Override public ApplicationGatewayListenerImpl withHttps() { this.innerModel().withProtocol(ApplicationGatewayProtocol.HTTPS); return this; } @Override @Override public ApplicationGatewayListenerImpl withHostnames(List<String> hostnames) { this.innerModel().withHostname(null); if (!CoreUtils.isNullOrEmpty(hostnames)) { this.innerModel().withHostNames(hostnames); } return this; } @Override public ApplicationGatewayListenerImpl withServerNameIndication() { this.innerModel().withRequireServerNameIndication(true); return this; } @Override public ApplicationGatewayListenerImpl withoutServerNameIndication() { this.innerModel().withRequireServerNameIndication(false); return this; } @Override public ApplicationGatewayListenerImpl withPrivateFrontend() { this.withFrontend(this.parent().ensureDefaultPrivateFrontend().name()); return this; } @Override public ApplicationGatewayListenerImpl withPublicFrontend() { this.withFrontend(this.parent().ensureDefaultPublicFrontend().name()); return this; } }
class ApplicationGatewayListenerImpl extends ChildResourceImpl<ApplicationGatewayHttpListener, ApplicationGatewayImpl, ApplicationGateway> implements ApplicationGatewayListener, ApplicationGatewayListener.Definition<ApplicationGateway.DefinitionStages.WithCreate>, ApplicationGatewayListener.UpdateDefinition<ApplicationGateway.Update>, ApplicationGatewayListener.Update { ApplicationGatewayListenerImpl(ApplicationGatewayHttpListener inner, ApplicationGatewayImpl parent) { super(inner, parent); } @Override public String networkId() { ApplicationGatewayFrontend frontend = this.frontend(); if (frontend != null) { return frontend.networkId(); } else { return null; } } @Override public String subnetName() { ApplicationGatewayFrontend frontend = this.frontend(); if (frontend != null) { return frontend.subnetName(); } else { return null; } } @Override public boolean requiresServerNameIndication() { if (this.innerModel().requireServerNameIndication() != null) { return this.innerModel().requireServerNameIndication(); } else { return false; } } @Override public String hostname() { if (this.innerModel().hostname() != null) { return this.innerModel().hostname(); } if (!CoreUtils.isNullOrEmpty(this.innerModel().hostNames())) { return this.innerModel().hostNames().get(0); } return null; } @Override public List<String> hostnames() { if (this.innerModel().hostname() != null) { return Collections.singletonList(this.innerModel().hostname()); } if (CoreUtils.isNullOrEmpty(this.innerModel().hostNames())) { return Collections.emptyList(); } return Collections.unmodifiableList(this.innerModel().hostNames()); } @Override public String publicIpAddressId() { final ApplicationGatewayFrontend frontend = this.frontend(); if (frontend == null) { return null; } else { return frontend.publicIpAddressId(); } } @Override public PublicIpAddress getPublicIpAddress() { return this.getPublicIpAddressAsync().block(); } @Override public Mono<PublicIpAddress> getPublicIpAddressAsync() { String pipId = this.publicIpAddressId(); return pipId == null ? Mono.empty() : this.parent().manager().publicIpAddresses().getByIdAsync(pipId); } @Override public String name() { return this.innerModel().name(); } @Override public ApplicationGatewaySslCertificate sslCertificate() { SubResource certRef = this.innerModel().sslCertificate(); if (certRef == null) { return null; } String name = ResourceUtils.nameFromResourceId(certRef.id()); return this.parent().sslCertificates().get(name); } @Override public ApplicationGatewayProtocol protocol() { return this.innerModel().protocol(); } @Override public int frontendPortNumber() { String name = this.frontendPortName(); if (name == null) { return 0; } else if (!this.parent().frontendPorts().containsKey(name)) { return 0; } else { return this.parent().frontendPorts().get(name); } } @Override public String frontendPortName() { if (this.innerModel().frontendPort() != null) { return ResourceUtils.nameFromResourceId(this.innerModel().frontendPort().id()); } else { return null; } } @Override public ApplicationGatewayFrontend frontend() { final SubResource frontendInner = this.innerModel().frontendIpConfiguration(); if (frontendInner == null) { return null; } else { final String frontendName = ResourceUtils.nameFromResourceId(frontendInner.id()); return this.parent().frontends().get(frontendName); } } @Override public ApplicationGatewayImpl attach() { this.parent().withHttpListener(this); return this.parent(); } private ApplicationGatewayListenerImpl withFrontend(String name) { SubResource frontendRef = new SubResource().withId(this.parent().futureResourceId() + "/frontendIPConfigurations/" + name); this.innerModel().withFrontendIpConfiguration(frontendRef); return this; } @Override public ApplicationGatewayListenerImpl withFrontendPort(String name) { SubResource portRef = new SubResource().withId(this.parent().futureResourceId() + "/frontendPorts/" + name); this.innerModel().withFrontendPort(portRef); return this; } @Override public ApplicationGatewayListenerImpl withFrontendPort(int portNumber) { String portName = this.parent().frontendPortNameFromNumber(portNumber); if (portName == null) { portName = this.parent().manager().resourceManager().internalContext() .randomResourceName("port", 9); this.parent().withFrontendPort(portNumber, portName); } return this.withFrontendPort(portName); } @Override public ApplicationGatewayListenerImpl withSslCertificate(String name) { SubResource certRef = new SubResource().withId(this.parent().futureResourceId() + "/sslCertificates/" + name); this.innerModel().withSslCertificate(certRef); return this; } @Override public ApplicationGatewayListenerImpl withSslCertificateFromKeyVaultSecretId(String keyVaultSecretId) { return withSslCertificateFromKeyVaultSecretId(keyVaultSecretId, null); } private ApplicationGatewayListenerImpl withSslCertificateFromKeyVaultSecretId( String keyVaultSecretId, String name) { if (name == null) { name = this.parent().manager().resourceManager().internalContext() .randomResourceName("cert", 10); } this.parent().defineSslCertificate(name).withKeyVaultSecretId(keyVaultSecretId).attach(); return this; } @Override public ApplicationGatewayListenerImpl withSslCertificateFromPfxFile(File pfxFile) throws IOException { return withSslCertificateFromPfxFile(pfxFile, null); } private ApplicationGatewayListenerImpl withSslCertificateFromPfxFile(File pfxFile, String name) throws IOException { if (name == null) { name = this.parent().manager().resourceManager().internalContext() .randomResourceName("cert", 10); } this.parent().defineSslCertificate(name).withPfxFromFile(pfxFile).attach(); return this.withSslCertificate(name); } @Override public ApplicationGatewayListenerImpl withSslCertificatePassword(String password) { ApplicationGatewaySslCertificateImpl sslCert = (ApplicationGatewaySslCertificateImpl) this.sslCertificate(); if (sslCert != null) { sslCert.withPfxPassword(password); } return this; } @Override public ApplicationGatewayListenerImpl withHttp() { this.innerModel().withProtocol(ApplicationGatewayProtocol.HTTP); return this; } @Override public ApplicationGatewayListenerImpl withHttps() { this.innerModel().withProtocol(ApplicationGatewayProtocol.HTTPS); return this; } @Override @Override public ApplicationGatewayListenerImpl withHostnames(List<String> hostnames) { if (!CoreUtils.isNullOrEmpty(hostnames)) { this.innerModel().withHostname(null); this.innerModel().withHostNames(hostnames); } return this; } @Override public ApplicationGatewayListenerImpl withServerNameIndication() { this.innerModel().withRequireServerNameIndication(true); return this; } @Override public ApplicationGatewayListenerImpl withoutServerNameIndication() { this.innerModel().withRequireServerNameIndication(false); return this; } @Override public ApplicationGatewayListenerImpl withPrivateFrontend() { this.withFrontend(this.parent().ensureDefaultPrivateFrontend().name()); return this; } @Override public ApplicationGatewayListenerImpl withPublicFrontend() { this.withFrontend(this.parent().ensureDefaultPublicFrontend().name()); return this; } }
Move this line into `if (hostname != null) {`?
public ApplicationGatewayListenerImpl withHostnames(List<String> hostnames) { this.innerModel().withHostname(null); if (!CoreUtils.isNullOrEmpty(hostnames)) { this.innerModel().withHostNames(hostnames); } return this; }
this.innerModel().withHostname(null);
public ApplicationGatewayListenerImpl withHostnames(List<String> hostnames) { if (!CoreUtils.isNullOrEmpty(hostnames)) { this.innerModel().withHostname(null); this.innerModel().withHostNames(hostnames); } return this; }
class ApplicationGatewayListenerImpl extends ChildResourceImpl<ApplicationGatewayHttpListener, ApplicationGatewayImpl, ApplicationGateway> implements ApplicationGatewayListener, ApplicationGatewayListener.Definition<ApplicationGateway.DefinitionStages.WithCreate>, ApplicationGatewayListener.UpdateDefinition<ApplicationGateway.Update>, ApplicationGatewayListener.Update { ApplicationGatewayListenerImpl(ApplicationGatewayHttpListener inner, ApplicationGatewayImpl parent) { super(inner, parent); } @Override public String networkId() { ApplicationGatewayFrontend frontend = this.frontend(); if (frontend != null) { return frontend.networkId(); } else { return null; } } @Override public String subnetName() { ApplicationGatewayFrontend frontend = this.frontend(); if (frontend != null) { return frontend.subnetName(); } else { return null; } } @Override public boolean requiresServerNameIndication() { if (this.innerModel().requireServerNameIndication() != null) { return this.innerModel().requireServerNameIndication(); } else { return false; } } @Override public String hostname() { if (this.innerModel().hostname() != null) { return this.innerModel().hostname(); } if (!CoreUtils.isNullOrEmpty(this.innerModel().hostNames())) { return this.innerModel().hostNames().get(0); } return null; } @Override public List<String> hostnames() { if (this.innerModel().hostname() != null) { return Collections.singletonList(this.innerModel().hostname()); } if (CoreUtils.isNullOrEmpty(this.innerModel().hostNames())) { return Collections.emptyList(); } return Collections.unmodifiableList(this.innerModel().hostNames()); } @Override public String publicIpAddressId() { final ApplicationGatewayFrontend frontend = this.frontend(); if (frontend == null) { return null; } else { return frontend.publicIpAddressId(); } } @Override public PublicIpAddress getPublicIpAddress() { return this.getPublicIpAddressAsync().block(); } @Override public Mono<PublicIpAddress> getPublicIpAddressAsync() { String pipId = this.publicIpAddressId(); return pipId == null ? Mono.empty() : this.parent().manager().publicIpAddresses().getByIdAsync(pipId); } @Override public String name() { return this.innerModel().name(); } @Override public ApplicationGatewaySslCertificate sslCertificate() { SubResource certRef = this.innerModel().sslCertificate(); if (certRef == null) { return null; } String name = ResourceUtils.nameFromResourceId(certRef.id()); return this.parent().sslCertificates().get(name); } @Override public ApplicationGatewayProtocol protocol() { return this.innerModel().protocol(); } @Override public int frontendPortNumber() { String name = this.frontendPortName(); if (name == null) { return 0; } else if (!this.parent().frontendPorts().containsKey(name)) { return 0; } else { return this.parent().frontendPorts().get(name); } } @Override public String frontendPortName() { if (this.innerModel().frontendPort() != null) { return ResourceUtils.nameFromResourceId(this.innerModel().frontendPort().id()); } else { return null; } } @Override public ApplicationGatewayFrontend frontend() { final SubResource frontendInner = this.innerModel().frontendIpConfiguration(); if (frontendInner == null) { return null; } else { final String frontendName = ResourceUtils.nameFromResourceId(frontendInner.id()); return this.parent().frontends().get(frontendName); } } @Override public ApplicationGatewayImpl attach() { this.parent().withHttpListener(this); return this.parent(); } private ApplicationGatewayListenerImpl withFrontend(String name) { SubResource frontendRef = new SubResource().withId(this.parent().futureResourceId() + "/frontendIPConfigurations/" + name); this.innerModel().withFrontendIpConfiguration(frontendRef); return this; } @Override public ApplicationGatewayListenerImpl withFrontendPort(String name) { SubResource portRef = new SubResource().withId(this.parent().futureResourceId() + "/frontendPorts/" + name); this.innerModel().withFrontendPort(portRef); return this; } @Override public ApplicationGatewayListenerImpl withFrontendPort(int portNumber) { String portName = this.parent().frontendPortNameFromNumber(portNumber); if (portName == null) { portName = this.parent().manager().resourceManager().internalContext() .randomResourceName("port", 9); this.parent().withFrontendPort(portNumber, portName); } return this.withFrontendPort(portName); } @Override public ApplicationGatewayListenerImpl withSslCertificate(String name) { SubResource certRef = new SubResource().withId(this.parent().futureResourceId() + "/sslCertificates/" + name); this.innerModel().withSslCertificate(certRef); return this; } @Override public ApplicationGatewayListenerImpl withSslCertificateFromKeyVaultSecretId(String keyVaultSecretId) { return withSslCertificateFromKeyVaultSecretId(keyVaultSecretId, null); } private ApplicationGatewayListenerImpl withSslCertificateFromKeyVaultSecretId( String keyVaultSecretId, String name) { if (name == null) { name = this.parent().manager().resourceManager().internalContext() .randomResourceName("cert", 10); } this.parent().defineSslCertificate(name).withKeyVaultSecretId(keyVaultSecretId).attach(); return this; } @Override public ApplicationGatewayListenerImpl withSslCertificateFromPfxFile(File pfxFile) throws IOException { return withSslCertificateFromPfxFile(pfxFile, null); } private ApplicationGatewayListenerImpl withSslCertificateFromPfxFile(File pfxFile, String name) throws IOException { if (name == null) { name = this.parent().manager().resourceManager().internalContext() .randomResourceName("cert", 10); } this.parent().defineSslCertificate(name).withPfxFromFile(pfxFile).attach(); return this.withSslCertificate(name); } @Override public ApplicationGatewayListenerImpl withSslCertificatePassword(String password) { ApplicationGatewaySslCertificateImpl sslCert = (ApplicationGatewaySslCertificateImpl) this.sslCertificate(); if (sslCert != null) { sslCert.withPfxPassword(password); } return this; } @Override public ApplicationGatewayListenerImpl withHttp() { this.innerModel().withProtocol(ApplicationGatewayProtocol.HTTP); return this; } @Override public ApplicationGatewayListenerImpl withHttps() { this.innerModel().withProtocol(ApplicationGatewayProtocol.HTTPS); return this; } @Override public ApplicationGatewayListenerImpl withHostname(String hostname) { this.innerModel().withHostname(null); if (hostname != null) { List<String> hostNames = new ArrayList<>(); hostNames.add(hostname); this.innerModel().withHostNames(hostNames); } return this; } @Override @Override public ApplicationGatewayListenerImpl withServerNameIndication() { this.innerModel().withRequireServerNameIndication(true); return this; } @Override public ApplicationGatewayListenerImpl withoutServerNameIndication() { this.innerModel().withRequireServerNameIndication(false); return this; } @Override public ApplicationGatewayListenerImpl withPrivateFrontend() { this.withFrontend(this.parent().ensureDefaultPrivateFrontend().name()); return this; } @Override public ApplicationGatewayListenerImpl withPublicFrontend() { this.withFrontend(this.parent().ensureDefaultPublicFrontend().name()); return this; } }
class ApplicationGatewayListenerImpl extends ChildResourceImpl<ApplicationGatewayHttpListener, ApplicationGatewayImpl, ApplicationGateway> implements ApplicationGatewayListener, ApplicationGatewayListener.Definition<ApplicationGateway.DefinitionStages.WithCreate>, ApplicationGatewayListener.UpdateDefinition<ApplicationGateway.Update>, ApplicationGatewayListener.Update { ApplicationGatewayListenerImpl(ApplicationGatewayHttpListener inner, ApplicationGatewayImpl parent) { super(inner, parent); } @Override public String networkId() { ApplicationGatewayFrontend frontend = this.frontend(); if (frontend != null) { return frontend.networkId(); } else { return null; } } @Override public String subnetName() { ApplicationGatewayFrontend frontend = this.frontend(); if (frontend != null) { return frontend.subnetName(); } else { return null; } } @Override public boolean requiresServerNameIndication() { if (this.innerModel().requireServerNameIndication() != null) { return this.innerModel().requireServerNameIndication(); } else { return false; } } @Override public String hostname() { if (this.innerModel().hostname() != null) { return this.innerModel().hostname(); } if (!CoreUtils.isNullOrEmpty(this.innerModel().hostNames())) { return this.innerModel().hostNames().get(0); } return null; } @Override public List<String> hostnames() { if (this.innerModel().hostname() != null) { return Collections.singletonList(this.innerModel().hostname()); } if (CoreUtils.isNullOrEmpty(this.innerModel().hostNames())) { return Collections.emptyList(); } return Collections.unmodifiableList(this.innerModel().hostNames()); } @Override public String publicIpAddressId() { final ApplicationGatewayFrontend frontend = this.frontend(); if (frontend == null) { return null; } else { return frontend.publicIpAddressId(); } } @Override public PublicIpAddress getPublicIpAddress() { return this.getPublicIpAddressAsync().block(); } @Override public Mono<PublicIpAddress> getPublicIpAddressAsync() { String pipId = this.publicIpAddressId(); return pipId == null ? Mono.empty() : this.parent().manager().publicIpAddresses().getByIdAsync(pipId); } @Override public String name() { return this.innerModel().name(); } @Override public ApplicationGatewaySslCertificate sslCertificate() { SubResource certRef = this.innerModel().sslCertificate(); if (certRef == null) { return null; } String name = ResourceUtils.nameFromResourceId(certRef.id()); return this.parent().sslCertificates().get(name); } @Override public ApplicationGatewayProtocol protocol() { return this.innerModel().protocol(); } @Override public int frontendPortNumber() { String name = this.frontendPortName(); if (name == null) { return 0; } else if (!this.parent().frontendPorts().containsKey(name)) { return 0; } else { return this.parent().frontendPorts().get(name); } } @Override public String frontendPortName() { if (this.innerModel().frontendPort() != null) { return ResourceUtils.nameFromResourceId(this.innerModel().frontendPort().id()); } else { return null; } } @Override public ApplicationGatewayFrontend frontend() { final SubResource frontendInner = this.innerModel().frontendIpConfiguration(); if (frontendInner == null) { return null; } else { final String frontendName = ResourceUtils.nameFromResourceId(frontendInner.id()); return this.parent().frontends().get(frontendName); } } @Override public ApplicationGatewayImpl attach() { this.parent().withHttpListener(this); return this.parent(); } private ApplicationGatewayListenerImpl withFrontend(String name) { SubResource frontendRef = new SubResource().withId(this.parent().futureResourceId() + "/frontendIPConfigurations/" + name); this.innerModel().withFrontendIpConfiguration(frontendRef); return this; } @Override public ApplicationGatewayListenerImpl withFrontendPort(String name) { SubResource portRef = new SubResource().withId(this.parent().futureResourceId() + "/frontendPorts/" + name); this.innerModel().withFrontendPort(portRef); return this; } @Override public ApplicationGatewayListenerImpl withFrontendPort(int portNumber) { String portName = this.parent().frontendPortNameFromNumber(portNumber); if (portName == null) { portName = this.parent().manager().resourceManager().internalContext() .randomResourceName("port", 9); this.parent().withFrontendPort(portNumber, portName); } return this.withFrontendPort(portName); } @Override public ApplicationGatewayListenerImpl withSslCertificate(String name) { SubResource certRef = new SubResource().withId(this.parent().futureResourceId() + "/sslCertificates/" + name); this.innerModel().withSslCertificate(certRef); return this; } @Override public ApplicationGatewayListenerImpl withSslCertificateFromKeyVaultSecretId(String keyVaultSecretId) { return withSslCertificateFromKeyVaultSecretId(keyVaultSecretId, null); } private ApplicationGatewayListenerImpl withSslCertificateFromKeyVaultSecretId( String keyVaultSecretId, String name) { if (name == null) { name = this.parent().manager().resourceManager().internalContext() .randomResourceName("cert", 10); } this.parent().defineSslCertificate(name).withKeyVaultSecretId(keyVaultSecretId).attach(); return this; } @Override public ApplicationGatewayListenerImpl withSslCertificateFromPfxFile(File pfxFile) throws IOException { return withSslCertificateFromPfxFile(pfxFile, null); } private ApplicationGatewayListenerImpl withSslCertificateFromPfxFile(File pfxFile, String name) throws IOException { if (name == null) { name = this.parent().manager().resourceManager().internalContext() .randomResourceName("cert", 10); } this.parent().defineSslCertificate(name).withPfxFromFile(pfxFile).attach(); return this.withSslCertificate(name); } @Override public ApplicationGatewayListenerImpl withSslCertificatePassword(String password) { ApplicationGatewaySslCertificateImpl sslCert = (ApplicationGatewaySslCertificateImpl) this.sslCertificate(); if (sslCert != null) { sslCert.withPfxPassword(password); } return this; } @Override public ApplicationGatewayListenerImpl withHttp() { this.innerModel().withProtocol(ApplicationGatewayProtocol.HTTP); return this; } @Override public ApplicationGatewayListenerImpl withHttps() { this.innerModel().withProtocol(ApplicationGatewayProtocol.HTTPS); return this; } @Override public ApplicationGatewayListenerImpl withHostname(String hostname) { if (hostname != null) { this.innerModel().withHostname(null); List<String> hostNames = new ArrayList<>(); hostNames.add(hostname); this.innerModel().withHostNames(hostNames); } return this; } @Override @Override public ApplicationGatewayListenerImpl withServerNameIndication() { this.innerModel().withRequireServerNameIndication(true); return this; } @Override public ApplicationGatewayListenerImpl withoutServerNameIndication() { this.innerModel().withRequireServerNameIndication(false); return this; } @Override public ApplicationGatewayListenerImpl withPrivateFrontend() { this.withFrontend(this.parent().ensureDefaultPrivateFrontend().name()); return this; } @Override public ApplicationGatewayListenerImpl withPublicFrontend() { this.withFrontend(this.parent().ensureDefaultPublicFrontend().name()); return this; } }
I am not sure this will inject the codesnippet correctly. I think both `BEGIN` and the tagname have to be on the same line. Same with `END` below. Could you please verify that the javadocs are generated with this codesnippet correctly?
public static void main(String[] args) { DeviceManagementClient deviceManagementClient = new DeviceManagementClientBuilder() .credential(new DefaultAzureCredentialBuilder().build()) .endpoint("contoso.api.adu.microsoft.com") .instanceId("blue") .buildClient(); RequestOptions requestOptions = new RequestOptions(); requestOptions.addQueryParam("action", "cancel"); Response<BinaryData> response = deviceManagementClient.stopDeploymentWithResponse("TestGroup", "deploymentId", requestOptions); }
public static void main(String[] args) { DeviceManagementClient deviceManagementClient = new DeviceManagementClientBuilder() .credential(new DefaultAzureCredentialBuilder().build()) .endpoint("contoso.api.adu.microsoft.com") .instanceId("blue") .buildClient(); RequestOptions requestOptions = new RequestOptions(); requestOptions.addQueryParam("action", "cancel"); Response<BinaryData> response = deviceManagementClient.stopDeploymentWithResponse("TestGroup", "deploymentId", requestOptions); }
class DeviceManagementCancelOrRetryDeployment { }
class DeviceManagementCancelOrRetryDeployment { }
If prefetch is enabled and the message was not emitted, we should not increment this right?
private void drainQueue() { if (isTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = bufferMessages.isEmpty(); SynchronousReceiveWork currentDownstream = null; while (numberRequested != 0L && !isEmpty) { if (isTerminated()) { break; } long numberConsumed = 0L; while (numberRequested != numberConsumed) { if (isEmpty || isTerminated()) { break; } final ServiceBusReceivedMessage message = bufferMessages.poll(); boolean isEmitted = false; while (!isEmitted) { currentDownstream = getOrUpdateCurrentWork(); if (currentDownstream == null) { break; } isEmitted = currentDownstream.emitNext(message); } if (!isEmitted) { if (isPrefetchDisabled) { asyncClient.release(message).subscribe(__ -> { }, error -> logger.warning("lockToken[{}] Couldn't release the message.", message.getLockToken(), error), () -> logger.verbose("lockToken[{}] Message successfully released.", message.getLockToken())); } else { bufferMessages.addFirst(message); break; } } numberConsumed++; isEmpty = bufferMessages.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberConsumed); } } }
numberConsumed++;
private void drainQueue() { if (isTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = bufferMessages.isEmpty(); SynchronousReceiveWork currentDownstream = null; while (numberRequested != 0L && !isEmpty) { if (isTerminated()) { break; } long numberConsumed = 0L; while (numberRequested != numberConsumed) { if (isEmpty || isTerminated()) { break; } final ServiceBusReceivedMessage message = bufferMessages.poll(); boolean isEmitted = false; while (!isEmitted) { currentDownstream = getOrUpdateCurrentWork(); if (currentDownstream == null) { break; } isEmitted = currentDownstream.emitNext(message); } if (!isEmitted) { if (isPrefetchDisabled) { asyncClient.release(message).subscribe(__ -> { }, error -> logger.warning("lockToken[{}] Couldn't release the message.", message.getLockToken(), error), () -> logger.verbose("lockToken[{}] Message successfully released.", message.getLockToken())); } else { bufferMessages.addFirst(message); break; } } numberConsumed++; isEmpty = bufferMessages.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberConsumed); } } }
class SynchronousMessageSubscriber extends BaseSubscriber<ServiceBusReceivedMessage> { private final ClientLogger logger = new ClientLogger(SynchronousMessageSubscriber.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger wip = new AtomicInteger(); private final ConcurrentLinkedQueue<SynchronousReceiveWork> workQueue = new ConcurrentLinkedQueue<>(); private final ConcurrentLinkedDeque<ServiceBusReceivedMessage> bufferMessages = new ConcurrentLinkedDeque<>(); private final Object currentWorkLock = new Object(); private final ServiceBusReceiverAsyncClient asyncClient; private final boolean isPrefetchDisabled; private final Duration operationTimeout; private volatile SynchronousReceiveWork currentWork; /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<SynchronousMessageSubscriber> REQUESTED = AtomicLongFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, "requested"); private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<SynchronousMessageSubscriber, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, Subscription.class, "upstream"); /** * Creates a synchronous subscriber with some initial work to queue. * * * @param asyncClient Client to update disposition of messages. * @param isPrefetchDisabled Indicates if the prefetch is disabled. * @param operationTimeout Timeout to wait for operation to complete. * @param initialWork Initial work to queue. * * <p> * When {@code isPrefetchDisabled} is true, we release the messages those received during the timespan * between the last terminated downstream and the next active downstream. * </p> * * @throws NullPointerException if {@code initialWork} is null. * @throws IllegalArgumentException if {@code initialWork.getNumberOfEvents()} is less than 1. */ SynchronousMessageSubscriber(ServiceBusReceiverAsyncClient asyncClient, SynchronousReceiveWork initialWork, boolean isPrefetchDisabled, Duration operationTimeout) { this.asyncClient = Objects.requireNonNull(asyncClient, "'asyncClient' cannot be null."); this.operationTimeout = Objects.requireNonNull(operationTimeout, "'operationTimeout' cannot be null."); this.workQueue.add(Objects.requireNonNull(initialWork, "'initialWork' cannot be null.")); this.isPrefetchDisabled = isPrefetchDisabled; if (initialWork.getNumberOfEvents() < 1) { throw logger.logExceptionAsError(new IllegalArgumentException( "'numberOfEvents' cannot be less than 1. Actual: " + initialWork.getNumberOfEvents())); } Operators.addCap(REQUESTED, this, initialWork.getNumberOfEvents()); } /** * On an initial subscription, will take the first work item, and request that amount of work for it. * * @param subscription Subscription for upstream. */ @Override protected void hookOnSubscribe(Subscription subscription) { if (!Operators.setOnce(UPSTREAM, this, subscription)) { logger.warning("This should only be subscribed to once. Ignoring subscription."); return; } getOrUpdateCurrentWork(); subscription.request(REQUESTED.get(this)); } /** * Publishes the event to the current {@link SynchronousReceiveWork}. If that work item is complete, will dispose of * the subscriber. * * @param message Event to publish. */ @Override protected void hookOnNext(ServiceBusReceivedMessage message) { if (isTerminated()) { Operators.onNextDropped(message, Context.empty()); } else { bufferMessages.add(message); drain(); } } /** * Queue the work to be picked up by drain loop. * * @param work to be queued. */ void queueWork(SynchronousReceiveWork work) { Objects.requireNonNull(work, "'work' cannot be null"); workQueue.add(work); if (workQueue.peek() == work) { logger.verbose("workId[{}] numberOfEvents[{}] timeout[{}] First work in queue. Requesting upstream if " + "needed.", work.getId(), work.getNumberOfEvents(), work.getTimeout()); getOrUpdateCurrentWork(); } else { logger.verbose("workId[{}] numberOfEvents[{}] timeout[{}] Queuing receive work.", work.getId(), work.getNumberOfEvents(), work.getTimeout()); } if (UPSTREAM.get(this) != null) { drain(); } } /** * Drain the work, only one thread can be in this loop at a time. */ private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { try { drainQueue(); } finally { missed = wip.addAndGet(-missed); } } } /*** * Drain the queue using a lock on current work in progress. */ /** * {@inheritDoc} */ @Override protected void hookOnError(Throwable throwable) { dispose("Errors occurred upstream", throwable); } @Override protected void hookOnCancel() { this.dispose(); } private boolean isTerminated() { if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { return true; } return isDisposed.get(); } /** * Gets the current work item if it is not terminal and cleans up any existing timeout operations. * * @return Gets or sets the next work item. Null if there are no work items currently. */ private SynchronousReceiveWork getOrUpdateCurrentWork() { synchronized (currentWorkLock) { if (currentWork != null && !currentWork.isTerminal()) { return currentWork; } currentWork = workQueue.poll(); while (currentWork != null) { if (currentWork.isTerminal()) { REQUESTED.updateAndGet(this, currentRequest -> { final int remainingEvents = currentWork.getRemainingEvents(); if (remainingEvents < 1) { return currentRequest; } final long difference = currentRequest - remainingEvents; logger.verbose("Updating REQUESTED because current work item is terminal. currentRequested[{}]" + " currentWork.remainingEvents[{}] difference[{}]", currentRequest, remainingEvents, difference); return difference < 0 ? 0 : difference; }); currentWork = workQueue.poll(); continue; } final SynchronousReceiveWork work = currentWork; logger.verbose("workId[{}] numberOfEvents[{}] Current work updated.", work.getId(), work.getNumberOfEvents()); work.start(); requestUpstream(work.getNumberOfEvents()); return work; } return currentWork; } } /** * Adds additional credits upstream if {@code numberOfMessages} is greater than the number of {@code REQUESTED} * items. * * @param numberOfMessages Number of messages required downstream. */ private void requestUpstream(long numberOfMessages) { if (isTerminated()) { logger.info("Cannot request more messages upstream. Subscriber is terminated."); return; } final Subscription subscription = UPSTREAM.get(this); if (subscription == null) { logger.info("There is no upstream to request messages from."); return; } final long currentRequested = REQUESTED.get(this); final long difference = numberOfMessages - currentRequested; logger.verbose("Requesting messages from upstream. currentRequested[{}] numberOfMessages[{}] difference[{}]", currentRequested, numberOfMessages, difference); if (difference <= 0) { return; } Operators.addCap(REQUESTED, this, difference); subscription.request(difference); } @Override public void dispose() { super.dispose(); dispose("Upstream completed the receive work.", null); } private void dispose(String message, Throwable throwable) { super.dispose(); if (isDisposed.getAndSet(true)) { return; } synchronized (currentWorkLock) { if (currentWork != null) { currentWork.complete(message, throwable); currentWork = null; } SynchronousReceiveWork w = workQueue.poll(); while (w != null) { w.complete(message, throwable); w = workQueue.poll(); } } } /** * package-private method to check queue size. * * @return The current number of items in the queue. */ int getWorkQueueSize() { return this.workQueue.size(); } }
class SynchronousMessageSubscriber extends BaseSubscriber<ServiceBusReceivedMessage> { private final ClientLogger logger = new ClientLogger(SynchronousMessageSubscriber.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger wip = new AtomicInteger(); private final ConcurrentLinkedQueue<SynchronousReceiveWork> workQueue = new ConcurrentLinkedQueue<>(); private final ConcurrentLinkedDeque<ServiceBusReceivedMessage> bufferMessages = new ConcurrentLinkedDeque<>(); private final Object currentWorkLock = new Object(); private final ServiceBusReceiverAsyncClient asyncClient; private final boolean isPrefetchDisabled; private final Duration operationTimeout; private volatile SynchronousReceiveWork currentWork; /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<SynchronousMessageSubscriber> REQUESTED = AtomicLongFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, "requested"); private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<SynchronousMessageSubscriber, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, Subscription.class, "upstream"); /** * Creates a synchronous subscriber with some initial work to queue. * * * @param asyncClient Client to update disposition of messages. * @param isPrefetchDisabled Indicates if the prefetch is disabled. * @param operationTimeout Timeout to wait for operation to complete. * @param initialWork Initial work to queue. * * <p> * When {@code isPrefetchDisabled} is true, we release the messages those received during the timespan * between the last terminated downstream and the next active downstream. * </p> * * @throws NullPointerException if {@code initialWork} is null. * @throws IllegalArgumentException if {@code initialWork.getNumberOfEvents()} is less than 1. */ SynchronousMessageSubscriber(ServiceBusReceiverAsyncClient asyncClient, SynchronousReceiveWork initialWork, boolean isPrefetchDisabled, Duration operationTimeout) { this.asyncClient = Objects.requireNonNull(asyncClient, "'asyncClient' cannot be null."); this.operationTimeout = Objects.requireNonNull(operationTimeout, "'operationTimeout' cannot be null."); this.workQueue.add(Objects.requireNonNull(initialWork, "'initialWork' cannot be null.")); this.isPrefetchDisabled = isPrefetchDisabled; if (initialWork.getNumberOfEvents() < 1) { throw logger.logExceptionAsError(new IllegalArgumentException( "'numberOfEvents' cannot be less than 1. Actual: " + initialWork.getNumberOfEvents())); } Operators.addCap(REQUESTED, this, initialWork.getNumberOfEvents()); } /** * On an initial subscription, will take the first work item, and request that amount of work for it. * * @param subscription Subscription for upstream. */ @Override protected void hookOnSubscribe(Subscription subscription) { if (!Operators.setOnce(UPSTREAM, this, subscription)) { logger.warning("This should only be subscribed to once. Ignoring subscription."); return; } getOrUpdateCurrentWork(); subscription.request(REQUESTED.get(this)); } /** * Publishes the event to the current {@link SynchronousReceiveWork}. If that work item is complete, will dispose of * the subscriber. * * @param message Event to publish. */ @Override protected void hookOnNext(ServiceBusReceivedMessage message) { if (isTerminated()) { Operators.onNextDropped(message, Context.empty()); } else { bufferMessages.add(message); drain(); } } /** * Queue the work to be picked up by drain loop. * * @param work to be queued. */ void queueWork(SynchronousReceiveWork work) { Objects.requireNonNull(work, "'work' cannot be null"); workQueue.add(work); if (workQueue.peek() == work) { logger.verbose("workId[{}] numberOfEvents[{}] timeout[{}] First work in queue. Requesting upstream if " + "needed.", work.getId(), work.getNumberOfEvents(), work.getTimeout()); getOrUpdateCurrentWork(); } else { logger.verbose("workId[{}] numberOfEvents[{}] timeout[{}] Queuing receive work.", work.getId(), work.getNumberOfEvents(), work.getTimeout()); } if (UPSTREAM.get(this) != null) { drain(); } } /** * Drain the work, only one thread can be in this loop at a time. */ private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { try { drainQueue(); } finally { missed = wip.addAndGet(-missed); } } } /*** * Drain the queue using a lock on current work in progress. */ /** * {@inheritDoc} */ @Override protected void hookOnError(Throwable throwable) { dispose("Errors occurred upstream", throwable); } @Override protected void hookOnCancel() { this.dispose(); } private boolean isTerminated() { if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { return true; } return isDisposed.get(); } /** * Gets the current work item if it is not terminal and cleans up any existing timeout operations. * * @return Gets or sets the next work item. Null if there are no work items currently. */ private SynchronousReceiveWork getOrUpdateCurrentWork() { synchronized (currentWorkLock) { if (currentWork != null && !currentWork.isTerminal()) { return currentWork; } currentWork = workQueue.poll(); while (currentWork != null) { if (currentWork.isTerminal()) { REQUESTED.updateAndGet(this, currentRequest -> { final int remainingEvents = currentWork.getRemainingEvents(); if (remainingEvents < 1) { return currentRequest; } final long difference = currentRequest - remainingEvents; logger.verbose("Updating REQUESTED because current work item is terminal. currentRequested[{}]" + " currentWork.remainingEvents[{}] difference[{}]", currentRequest, remainingEvents, difference); return difference < 0 ? 0 : difference; }); currentWork = workQueue.poll(); continue; } final SynchronousReceiveWork work = currentWork; logger.verbose("workId[{}] numberOfEvents[{}] Current work updated.", work.getId(), work.getNumberOfEvents()); work.start(); requestUpstream(work.getNumberOfEvents()); return work; } return currentWork; } } /** * Adds additional credits upstream if {@code numberOfMessages} is greater than the number of {@code REQUESTED} * items. * * @param numberOfMessages Number of messages required downstream. */ private void requestUpstream(long numberOfMessages) { if (isTerminated()) { logger.info("Cannot request more messages upstream. Subscriber is terminated."); return; } final Subscription subscription = UPSTREAM.get(this); if (subscription == null) { logger.info("There is no upstream to request messages from."); return; } final long currentRequested = REQUESTED.get(this); final long difference = numberOfMessages - currentRequested; logger.verbose("Requesting messages from upstream. currentRequested[{}] numberOfMessages[{}] difference[{}]", currentRequested, numberOfMessages, difference); if (difference <= 0) { return; } Operators.addCap(REQUESTED, this, difference); subscription.request(difference); } @Override public void dispose() { super.dispose(); dispose("Upstream completed the receive work.", null); } private void dispose(String message, Throwable throwable) { super.dispose(); if (isDisposed.getAndSet(true)) { return; } synchronized (currentWorkLock) { if (currentWork != null) { currentWork.complete(message, throwable); currentWork = null; } SynchronousReceiveWork w = workQueue.poll(); while (w != null) { w.complete(message, throwable); w = workQueue.poll(); } } } /** * package-private method to check queue size. * * @return The current number of items in the queue. */ int getWorkQueueSize() { return this.workQueue.size(); } }
Do we recommend using `logger.atWarning().addKeyValue("lockToken", <>)`?
private void drainQueue() { if (isTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = bufferMessages.isEmpty(); SynchronousReceiveWork currentDownstream = null; while (numberRequested != 0L && !isEmpty) { if (isTerminated()) { break; } long numberConsumed = 0L; while (numberRequested != numberConsumed) { if (isEmpty || isTerminated()) { break; } final ServiceBusReceivedMessage message = bufferMessages.poll(); boolean isEmitted = false; while (!isEmitted) { currentDownstream = getOrUpdateCurrentWork(); if (currentDownstream == null) { break; } isEmitted = currentDownstream.emitNext(message); } if (!isEmitted) { if (isPrefetchDisabled) { asyncClient.release(message).subscribe(__ -> { }, error -> logger.warning("lockToken[{}] Couldn't release the message.", message.getLockToken(), error), () -> logger.verbose("lockToken[{}] Message successfully released.", message.getLockToken())); } else { bufferMessages.addFirst(message); break; } } numberConsumed++; isEmpty = bufferMessages.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberConsumed); } } }
error -> logger.warning("lockToken[{}] Couldn't release the message.",
private void drainQueue() { if (isTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = bufferMessages.isEmpty(); SynchronousReceiveWork currentDownstream = null; while (numberRequested != 0L && !isEmpty) { if (isTerminated()) { break; } long numberConsumed = 0L; while (numberRequested != numberConsumed) { if (isEmpty || isTerminated()) { break; } final ServiceBusReceivedMessage message = bufferMessages.poll(); boolean isEmitted = false; while (!isEmitted) { currentDownstream = getOrUpdateCurrentWork(); if (currentDownstream == null) { break; } isEmitted = currentDownstream.emitNext(message); } if (!isEmitted) { if (isPrefetchDisabled) { asyncClient.release(message).subscribe(__ -> { }, error -> logger.warning("lockToken[{}] Couldn't release the message.", message.getLockToken(), error), () -> logger.verbose("lockToken[{}] Message successfully released.", message.getLockToken())); } else { bufferMessages.addFirst(message); break; } } numberConsumed++; isEmpty = bufferMessages.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberConsumed); } } }
class SynchronousMessageSubscriber extends BaseSubscriber<ServiceBusReceivedMessage> { private final ClientLogger logger = new ClientLogger(SynchronousMessageSubscriber.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger wip = new AtomicInteger(); private final ConcurrentLinkedQueue<SynchronousReceiveWork> workQueue = new ConcurrentLinkedQueue<>(); private final ConcurrentLinkedDeque<ServiceBusReceivedMessage> bufferMessages = new ConcurrentLinkedDeque<>(); private final Object currentWorkLock = new Object(); private final ServiceBusReceiverAsyncClient asyncClient; private final boolean isPrefetchDisabled; private final Duration operationTimeout; private volatile SynchronousReceiveWork currentWork; /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<SynchronousMessageSubscriber> REQUESTED = AtomicLongFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, "requested"); private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<SynchronousMessageSubscriber, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, Subscription.class, "upstream"); /** * Creates a synchronous subscriber with some initial work to queue. * * * @param asyncClient Client to update disposition of messages. * @param isPrefetchDisabled Indicates if the prefetch is disabled. * @param operationTimeout Timeout to wait for operation to complete. * @param initialWork Initial work to queue. * * <p> * When {@code isPrefetchDisabled} is true, we release the messages those received during the timespan * between the last terminated downstream and the next active downstream. * </p> * * @throws NullPointerException if {@code initialWork} is null. * @throws IllegalArgumentException if {@code initialWork.getNumberOfEvents()} is less than 1. */ SynchronousMessageSubscriber(ServiceBusReceiverAsyncClient asyncClient, SynchronousReceiveWork initialWork, boolean isPrefetchDisabled, Duration operationTimeout) { this.asyncClient = Objects.requireNonNull(asyncClient, "'asyncClient' cannot be null."); this.operationTimeout = Objects.requireNonNull(operationTimeout, "'operationTimeout' cannot be null."); this.workQueue.add(Objects.requireNonNull(initialWork, "'initialWork' cannot be null.")); this.isPrefetchDisabled = isPrefetchDisabled; if (initialWork.getNumberOfEvents() < 1) { throw logger.logExceptionAsError(new IllegalArgumentException( "'numberOfEvents' cannot be less than 1. Actual: " + initialWork.getNumberOfEvents())); } Operators.addCap(REQUESTED, this, initialWork.getNumberOfEvents()); } /** * On an initial subscription, will take the first work item, and request that amount of work for it. * * @param subscription Subscription for upstream. */ @Override protected void hookOnSubscribe(Subscription subscription) { if (!Operators.setOnce(UPSTREAM, this, subscription)) { logger.warning("This should only be subscribed to once. Ignoring subscription."); return; } getOrUpdateCurrentWork(); subscription.request(REQUESTED.get(this)); } /** * Publishes the event to the current {@link SynchronousReceiveWork}. If that work item is complete, will dispose of * the subscriber. * * @param message Event to publish. */ @Override protected void hookOnNext(ServiceBusReceivedMessage message) { if (isTerminated()) { Operators.onNextDropped(message, Context.empty()); } else { bufferMessages.add(message); drain(); } } /** * Queue the work to be picked up by drain loop. * * @param work to be queued. */ void queueWork(SynchronousReceiveWork work) { Objects.requireNonNull(work, "'work' cannot be null"); workQueue.add(work); if (workQueue.peek() == work) { logger.verbose("workId[{}] numberOfEvents[{}] timeout[{}] First work in queue. Requesting upstream if " + "needed.", work.getId(), work.getNumberOfEvents(), work.getTimeout()); getOrUpdateCurrentWork(); } else { logger.verbose("workId[{}] numberOfEvents[{}] timeout[{}] Queuing receive work.", work.getId(), work.getNumberOfEvents(), work.getTimeout()); } if (UPSTREAM.get(this) != null) { drain(); } } /** * Drain the work, only one thread can be in this loop at a time. */ private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { try { drainQueue(); } finally { missed = wip.addAndGet(-missed); } } } /*** * Drain the queue using a lock on current work in progress. */ /** * {@inheritDoc} */ @Override protected void hookOnError(Throwable throwable) { dispose("Errors occurred upstream", throwable); } @Override protected void hookOnCancel() { this.dispose(); } private boolean isTerminated() { if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { return true; } return isDisposed.get(); } /** * Gets the current work item if it is not terminal and cleans up any existing timeout operations. * * @return Gets or sets the next work item. Null if there are no work items currently. */ private SynchronousReceiveWork getOrUpdateCurrentWork() { synchronized (currentWorkLock) { if (currentWork != null && !currentWork.isTerminal()) { return currentWork; } currentWork = workQueue.poll(); while (currentWork != null) { if (currentWork.isTerminal()) { REQUESTED.updateAndGet(this, currentRequest -> { final int remainingEvents = currentWork.getRemainingEvents(); if (remainingEvents < 1) { return currentRequest; } final long difference = currentRequest - remainingEvents; logger.verbose("Updating REQUESTED because current work item is terminal. currentRequested[{}]" + " currentWork.remainingEvents[{}] difference[{}]", currentRequest, remainingEvents, difference); return difference < 0 ? 0 : difference; }); currentWork = workQueue.poll(); continue; } final SynchronousReceiveWork work = currentWork; logger.verbose("workId[{}] numberOfEvents[{}] Current work updated.", work.getId(), work.getNumberOfEvents()); work.start(); requestUpstream(work.getNumberOfEvents()); return work; } return currentWork; } } /** * Adds additional credits upstream if {@code numberOfMessages} is greater than the number of {@code REQUESTED} * items. * * @param numberOfMessages Number of messages required downstream. */ private void requestUpstream(long numberOfMessages) { if (isTerminated()) { logger.info("Cannot request more messages upstream. Subscriber is terminated."); return; } final Subscription subscription = UPSTREAM.get(this); if (subscription == null) { logger.info("There is no upstream to request messages from."); return; } final long currentRequested = REQUESTED.get(this); final long difference = numberOfMessages - currentRequested; logger.verbose("Requesting messages from upstream. currentRequested[{}] numberOfMessages[{}] difference[{}]", currentRequested, numberOfMessages, difference); if (difference <= 0) { return; } Operators.addCap(REQUESTED, this, difference); subscription.request(difference); } @Override public void dispose() { super.dispose(); dispose("Upstream completed the receive work.", null); } private void dispose(String message, Throwable throwable) { super.dispose(); if (isDisposed.getAndSet(true)) { return; } synchronized (currentWorkLock) { if (currentWork != null) { currentWork.complete(message, throwable); currentWork = null; } SynchronousReceiveWork w = workQueue.poll(); while (w != null) { w.complete(message, throwable); w = workQueue.poll(); } } } /** * package-private method to check queue size. * * @return The current number of items in the queue. */ int getWorkQueueSize() { return this.workQueue.size(); } }
class SynchronousMessageSubscriber extends BaseSubscriber<ServiceBusReceivedMessage> { private final ClientLogger logger = new ClientLogger(SynchronousMessageSubscriber.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger wip = new AtomicInteger(); private final ConcurrentLinkedQueue<SynchronousReceiveWork> workQueue = new ConcurrentLinkedQueue<>(); private final ConcurrentLinkedDeque<ServiceBusReceivedMessage> bufferMessages = new ConcurrentLinkedDeque<>(); private final Object currentWorkLock = new Object(); private final ServiceBusReceiverAsyncClient asyncClient; private final boolean isPrefetchDisabled; private final Duration operationTimeout; private volatile SynchronousReceiveWork currentWork; /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<SynchronousMessageSubscriber> REQUESTED = AtomicLongFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, "requested"); private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<SynchronousMessageSubscriber, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, Subscription.class, "upstream"); /** * Creates a synchronous subscriber with some initial work to queue. * * * @param asyncClient Client to update disposition of messages. * @param isPrefetchDisabled Indicates if the prefetch is disabled. * @param operationTimeout Timeout to wait for operation to complete. * @param initialWork Initial work to queue. * * <p> * When {@code isPrefetchDisabled} is true, we release the messages those received during the timespan * between the last terminated downstream and the next active downstream. * </p> * * @throws NullPointerException if {@code initialWork} is null. * @throws IllegalArgumentException if {@code initialWork.getNumberOfEvents()} is less than 1. */ SynchronousMessageSubscriber(ServiceBusReceiverAsyncClient asyncClient, SynchronousReceiveWork initialWork, boolean isPrefetchDisabled, Duration operationTimeout) { this.asyncClient = Objects.requireNonNull(asyncClient, "'asyncClient' cannot be null."); this.operationTimeout = Objects.requireNonNull(operationTimeout, "'operationTimeout' cannot be null."); this.workQueue.add(Objects.requireNonNull(initialWork, "'initialWork' cannot be null.")); this.isPrefetchDisabled = isPrefetchDisabled; if (initialWork.getNumberOfEvents() < 1) { throw logger.logExceptionAsError(new IllegalArgumentException( "'numberOfEvents' cannot be less than 1. Actual: " + initialWork.getNumberOfEvents())); } Operators.addCap(REQUESTED, this, initialWork.getNumberOfEvents()); } /** * On an initial subscription, will take the first work item, and request that amount of work for it. * * @param subscription Subscription for upstream. */ @Override protected void hookOnSubscribe(Subscription subscription) { if (!Operators.setOnce(UPSTREAM, this, subscription)) { logger.warning("This should only be subscribed to once. Ignoring subscription."); return; } getOrUpdateCurrentWork(); subscription.request(REQUESTED.get(this)); } /** * Publishes the event to the current {@link SynchronousReceiveWork}. If that work item is complete, will dispose of * the subscriber. * * @param message Event to publish. */ @Override protected void hookOnNext(ServiceBusReceivedMessage message) { if (isTerminated()) { Operators.onNextDropped(message, Context.empty()); } else { bufferMessages.add(message); drain(); } } /** * Queue the work to be picked up by drain loop. * * @param work to be queued. */ void queueWork(SynchronousReceiveWork work) { Objects.requireNonNull(work, "'work' cannot be null"); workQueue.add(work); if (workQueue.peek() == work) { logger.verbose("workId[{}] numberOfEvents[{}] timeout[{}] First work in queue. Requesting upstream if " + "needed.", work.getId(), work.getNumberOfEvents(), work.getTimeout()); getOrUpdateCurrentWork(); } else { logger.verbose("workId[{}] numberOfEvents[{}] timeout[{}] Queuing receive work.", work.getId(), work.getNumberOfEvents(), work.getTimeout()); } if (UPSTREAM.get(this) != null) { drain(); } } /** * Drain the work, only one thread can be in this loop at a time. */ private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { try { drainQueue(); } finally { missed = wip.addAndGet(-missed); } } } /*** * Drain the queue using a lock on current work in progress. */ /** * {@inheritDoc} */ @Override protected void hookOnError(Throwable throwable) { dispose("Errors occurred upstream", throwable); } @Override protected void hookOnCancel() { this.dispose(); } private boolean isTerminated() { if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { return true; } return isDisposed.get(); } /** * Gets the current work item if it is not terminal and cleans up any existing timeout operations. * * @return Gets or sets the next work item. Null if there are no work items currently. */ private SynchronousReceiveWork getOrUpdateCurrentWork() { synchronized (currentWorkLock) { if (currentWork != null && !currentWork.isTerminal()) { return currentWork; } currentWork = workQueue.poll(); while (currentWork != null) { if (currentWork.isTerminal()) { REQUESTED.updateAndGet(this, currentRequest -> { final int remainingEvents = currentWork.getRemainingEvents(); if (remainingEvents < 1) { return currentRequest; } final long difference = currentRequest - remainingEvents; logger.verbose("Updating REQUESTED because current work item is terminal. currentRequested[{}]" + " currentWork.remainingEvents[{}] difference[{}]", currentRequest, remainingEvents, difference); return difference < 0 ? 0 : difference; }); currentWork = workQueue.poll(); continue; } final SynchronousReceiveWork work = currentWork; logger.verbose("workId[{}] numberOfEvents[{}] Current work updated.", work.getId(), work.getNumberOfEvents()); work.start(); requestUpstream(work.getNumberOfEvents()); return work; } return currentWork; } } /** * Adds additional credits upstream if {@code numberOfMessages} is greater than the number of {@code REQUESTED} * items. * * @param numberOfMessages Number of messages required downstream. */ private void requestUpstream(long numberOfMessages) { if (isTerminated()) { logger.info("Cannot request more messages upstream. Subscriber is terminated."); return; } final Subscription subscription = UPSTREAM.get(this); if (subscription == null) { logger.info("There is no upstream to request messages from."); return; } final long currentRequested = REQUESTED.get(this); final long difference = numberOfMessages - currentRequested; logger.verbose("Requesting messages from upstream. currentRequested[{}] numberOfMessages[{}] difference[{}]", currentRequested, numberOfMessages, difference); if (difference <= 0) { return; } Operators.addCap(REQUESTED, this, difference); subscription.request(difference); } @Override public void dispose() { super.dispose(); dispose("Upstream completed the receive work.", null); } private void dispose(String message, Throwable throwable) { super.dispose(); if (isDisposed.getAndSet(true)) { return; } synchronized (currentWorkLock) { if (currentWork != null) { currentWork.complete(message, throwable); currentWork = null; } SynchronousReceiveWork w = workQueue.poll(); while (w != null) { w.complete(message, throwable); w = workQueue.poll(); } } } /** * package-private method to check queue size. * * @return The current number of items in the queue. */ int getWorkQueueSize() { return this.workQueue.size(); } }
I don't understand these two tests. Why is IllegalStateException thrown?
public void bothRetryOptionsAndRetryPolicySetSync() { assertThrows(IllegalStateException.class, () -> setupBuilderWithHttpClientWithCredential(this.clientBuilder) .retryOptions(new RetryOptions(new ExponentialBackoffOptions())) .retryPolicy(new RetryPolicy()) .buildClient()); }
assertThrows(IllegalStateException.class, () -> setupBuilderWithHttpClientWithCredential(this.clientBuilder)
public void bothRetryOptionsAndRetryPolicySetSync() { assertThrows(IllegalStateException.class, () -> setupBuilderWithHttpClientWithCredential(this.clientBuilder) .retryOptions(new RetryOptions(new ExponentialBackoffOptions())) .retryPolicy(new RetryPolicy()) .buildClient()); }
class PhoneNumbersClientBuilderTest { private static final String ENDPOINT = "https: private static final String ACCESSKEY = "QWNjZXNzS2V5"; private static final Map<String, String> PROPERTIES = CoreUtils.getProperties("azure-communication-phonenumbers.properties"); private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private HttpClient httpClient; private PhoneNumbersClientBuilder clientBuilder; @BeforeEach void setUp() { this.httpClient = mock(HttpClient.class); this.clientBuilder = Mockito.spy(new PhoneNumbersClientBuilder()); } @AfterEach void tearDown() { Mockito.framework().clearInlineMock(this); } @Test() public void buildClientWithHttpClientWithCredential() { ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder); PhoneNumbersClient phoneNumberClient = this.setupBuilderWithHttpClientWithCredential(this.clientBuilder).buildClient(); assertNotNull(phoneNumberClient); validateRequiredSettings(spyHelper); } @Test() public void buildClientWithCustomPipeline() { ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder); HttpPipeline httpPipeline = mock(HttpPipeline.class); PhoneNumbersClient phoneNumberClient = this.setupBuilderCustomPipeline(httpPipeline).buildClient(); assertNotNull(phoneNumberClient); validateCustomPipeline(spyHelper, httpPipeline); } @Test() public void buildClientWithLogOptions() { ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder); HttpLogOptions logOptions = mock(HttpLogOptions.class); PhoneNumbersClient phoneNumberClient = this.setupBuilderWithHttpClientWithCredential(this.clientBuilder) .httpLogOptions(logOptions) .buildClient(); assertNotNull(phoneNumberClient); validateLogOptions(spyHelper, logOptions); } @Test() public void buildClientWithConfiguration() { ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder); Configuration configuration = mock(Configuration.class); PhoneNumbersClient phoneNumberClient = this.setupBuilderWithHttpClientWithCredential(this.clientBuilder) .configuration(configuration) .buildClient(); assertNotNull(phoneNumberClient); validateConfiguration(spyHelper, configuration); } @Test() public void buildClientWithServiceVersion() { PhoneNumbersClient phoneNumberClient = this.setupBuilderWithHttpClientWithCredential(this.clientBuilder) .serviceVersion(PhoneNumbersServiceVersion.V2021_03_07) .buildClient(); assertNotNull(phoneNumberClient); } @Test() public void buildClientWithOneAdditionalPolicy() { ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder); List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>(); additionalPolicies.add(mock(HttpPipelinePolicy.class)); PhoneNumbersClient phoneNumberClient = this.setupBuilderWithPolicies(this.clientBuilder, additionalPolicies) .buildClient(); assertNotNull(phoneNumberClient); validateAdditionalPolicies(spyHelper, additionalPolicies); } @Test() public void buildClientWithMultipleAdditionalPolicies() { ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder); List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>(); additionalPolicies.add(mock(HttpPipelinePolicy.class)); additionalPolicies.add(mock(HttpPipelinePolicy.class)); additionalPolicies.add(mock(HttpPipelinePolicy.class)); PhoneNumbersClient phoneNumberClient = this.setupBuilderWithPolicies(this.clientBuilder, additionalPolicies) .buildClient(); assertNotNull(phoneNumberClient); validateAdditionalPolicies(spyHelper, additionalPolicies); } @Test() public void buildClientNoEndpoint() { assertThrows(NullPointerException.class, () -> { this.clientBuilder.buildClient(); }); } @Test() public void buildClientNoPipelineNoCredentials() { assertThrows(NullPointerException.class, () -> { this.clientBuilder.endpoint(ENDPOINT).httpClient(this.httpClient).buildClient(); }); } @Test() public void buildAsyncClientWithHttpClientWithCredential() { ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder); PhoneNumbersAsyncClient phoneNumberAsyncClient = this.setupBuilderWithHttpClientWithCredential(this.clientBuilder).buildAsyncClient(); assertNotNull(phoneNumberAsyncClient); validateRequiredSettings(spyHelper); } @Test() public void buildAsyncClientWithCustomPipeline() { ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder); HttpPipeline httpPipeline = mock(HttpPipeline.class); PhoneNumbersAsyncClient phoneNumberAsyncClient = this.setupBuilderCustomPipeline(httpPipeline).buildAsyncClient(); assertNotNull(phoneNumberAsyncClient); validateCustomPipeline(spyHelper, httpPipeline); } @Test() public void buildAsyncClientWithLogOptions() { ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder); HttpLogOptions logOptions = mock(HttpLogOptions.class); PhoneNumbersAsyncClient phoneNumberAsyncClient = this.setupBuilderWithHttpClientWithCredential(this.clientBuilder) .httpLogOptions(logOptions) .buildAsyncClient(); assertNotNull(phoneNumberAsyncClient); validateLogOptions(spyHelper, logOptions); } @Test() public void buildAsyncClientWithConfiguration() { ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder); Configuration configuration = mock(Configuration.class); PhoneNumbersAsyncClient phoneNumberAsyncClient = this.setupBuilderWithHttpClientWithCredential(this.clientBuilder) .configuration(configuration) .buildAsyncClient(); assertNotNull(phoneNumberAsyncClient); validateConfiguration(spyHelper, configuration); } @Test() public void buildAsyncClientWithServiceVersion() { PhoneNumbersAsyncClient phoneNumberAsyncClient = this.setupBuilderWithHttpClientWithCredential(this.clientBuilder) .serviceVersion(PhoneNumbersServiceVersion.V2021_03_07) .buildAsyncClient(); assertNotNull(phoneNumberAsyncClient); } @Test() public void buildAsyncClientWithOneAdditionalPolicy() { ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder); List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>(); additionalPolicies.add(mock(HttpPipelinePolicy.class)); PhoneNumbersAsyncClient phoneNumberAsyncClient = this.setupBuilderWithPolicies(this.clientBuilder, additionalPolicies) .buildAsyncClient(); assertNotNull(phoneNumberAsyncClient); validateAdditionalPolicies(spyHelper, additionalPolicies); } @Test() public void buildAsyncClientWithMultipleAdditionalPolicies() { ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder); List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>(); additionalPolicies.add(mock(HttpPipelinePolicy.class)); additionalPolicies.add(mock(HttpPipelinePolicy.class)); additionalPolicies.add(mock(HttpPipelinePolicy.class)); PhoneNumbersAsyncClient phoneNumberAsyncClient = this.setupBuilderWithPolicies(this.clientBuilder, additionalPolicies) .buildAsyncClient(); assertNotNull(phoneNumberAsyncClient); validateAdditionalPolicies(spyHelper, additionalPolicies); } @Test() public void buildAsyncClientNoEndpointThrows() { assertThrows(NullPointerException.class, () -> { this.clientBuilder.buildClient(); }); } @Test() public void buildAsyncClientNoPipelineNoCredentialsThrows() { assertThrows(NullPointerException.class, () -> { this.clientBuilder.endpoint(ENDPOINT).httpClient(this.httpClient).buildClient(); }); } @Test() public void setEndpointNullThrows() { assertThrows(NullPointerException.class, () -> { this.clientBuilder.endpoint(null); }); } @Test() public void addPolicyNullThrows() { assertThrows(NullPointerException.class, () -> { this.clientBuilder.addPolicy(null); }); } @Test @Test public void bothRetryOptionsAndRetryPolicySetAsync() { assertThrows(IllegalStateException.class, () -> setupBuilderWithHttpClientWithCredential(this.clientBuilder) .retryOptions(new RetryOptions(new ExponentialBackoffOptions())) .retryPolicy(new RetryPolicy()) .buildAsyncClient()); } private PhoneNumbersClientBuilder setupBuilderWithHttpClientWithCredential(PhoneNumbersClientBuilder clientBuilder) { return clientBuilder .endpoint(ENDPOINT) .httpClient(this.httpClient) .credential(new AzureKeyCredential(ACCESSKEY)); } private PhoneNumbersClientBuilder setupBuilderWithPolicies( PhoneNumbersClientBuilder clientBuilder, List<HttpPipelinePolicy> policies) { clientBuilder = this.setupBuilderWithHttpClientWithCredential(clientBuilder); for (HttpPipelinePolicy policy : policies) { clientBuilder.addPolicy(policy); } return clientBuilder; } private PhoneNumbersClientBuilder setupBuilderCustomPipeline(HttpPipeline pipeline) { return clientBuilder .endpoint(ENDPOINT) .pipeline(pipeline); } private void validateRequiredSettings(ClientBuilderSpyHelper spyHelper) { spyHelper.capturePhoneNumberAdminClientImpl(); spyHelper.captureHttpPipelineSettings(); PhoneNumberAdminClientImpl phoneNumberManagementClient = spyHelper.phoneNumberAdminClientArg.getValue(); assertEquals(ENDPOINT, phoneNumberManagementClient.getEndpoint()); assertEquals(this.httpClient, phoneNumberManagementClient.getHttpPipeline().getHttpClient()); assertEquals(6, phoneNumberManagementClient.getHttpPipeline().getPolicyCount()); assertEquals(spyHelper.userAgentPolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(0)); assertEquals(spyHelper.requestIdPolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(1)); assertEquals(spyHelper.authenticationPolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(3)); assertEquals(spyHelper.cookiePolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(4)); assertEquals(spyHelper.httpLoggingPolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(5)); assertEquals(spyHelper.defaultHttpLogOptionsRef.get(), spyHelper.httpLogOptionsArg.getValue()); assertEquals(spyHelper.defaultHttpLogOptionsRef.get().getApplicationId(), spyHelper.uaPolicyAppIdArg.getValue()); assertEquals(PROPERTIES.get((SDK_NAME)), spyHelper.uaPolicySdkNameArg.getValue()); assertEquals(PROPERTIES.get((SDK_VERSION)), spyHelper.uaPolicySdkVersionArg.getValue()); assertNull(spyHelper.uaPolicyConfigArg.getValue()); } private void validateCustomPipeline(ClientBuilderSpyHelper spyHelper, HttpPipeline expectedPipeline) { spyHelper.capturePhoneNumberAdminClientImpl(); PhoneNumberAdminClientImpl phoneNumberAdminClient = spyHelper.phoneNumberAdminClientArg.getValue(); assertEquals(expectedPipeline, phoneNumberAdminClient.getHttpPipeline()); } private void validateLogOptions(ClientBuilderSpyHelper spyHelper, HttpLogOptions expectedLogOptions) { spyHelper.captureHttpPipelineSettings(); HttpLogOptions actualLogOptions = spyHelper.httpLogOptionsArg.getValue(); assertEquals(expectedLogOptions, actualLogOptions); } private void validateConfiguration(ClientBuilderSpyHelper spyHelper, Configuration expectedConfiguration) { spyHelper.captureHttpPipelineSettings(); Configuration actualConfiguration = spyHelper.uaPolicyConfigArg.getValue(); assertEquals(expectedConfiguration, actualConfiguration); } private void validateAdditionalPolicies(ClientBuilderSpyHelper spyHelper, List<HttpPipelinePolicy> policies) { spyHelper.capturePhoneNumberAdminClientImpl(); PhoneNumberAdminClientImpl phoneNumberManagementClient = spyHelper.phoneNumberAdminClientArg.getValue(); int expectedPolicyCount = 6 + policies.size(); int lastPolicyIndex = expectedPolicyCount - 1; int customPolicyIndex = 5; assertEquals(expectedPolicyCount, phoneNumberManagementClient.getHttpPipeline().getPolicyCount()); assertEquals(spyHelper.userAgentPolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(0)); assertEquals(spyHelper.requestIdPolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(1)); assertEquals(spyHelper.authenticationPolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(3)); assertEquals(spyHelper.cookiePolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(4)); assertEquals(spyHelper.httpLoggingPolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(lastPolicyIndex)); for (HttpPipelinePolicy policy : policies) { assertEquals(policy, phoneNumberManagementClient.getHttpPipeline().getPolicy(customPolicyIndex)); customPolicyIndex++; } } private class ClientBuilderSpyHelper { final PhoneNumbersClientBuilder clientBuilder; final AtomicReference<HmacAuthenticationPolicy> authenticationPolicyRef = new AtomicReference<>(); final AtomicReference<UserAgentPolicy> userAgentPolicyRef = new AtomicReference<>(); final AtomicReference<RequestIdPolicy> requestIdPolicyRef = new AtomicReference<>(); final AtomicReference<RetryPolicy> retryPolicyRef = new AtomicReference<>(); final AtomicReference<CookiePolicy> cookiePolicyRef = new AtomicReference<>(); final AtomicReference<HttpLoggingPolicy> httpLoggingPolicyRef = new AtomicReference<>(); final AtomicReference<HttpLogOptions> defaultHttpLogOptionsRef = new AtomicReference<>(); final ArgumentCaptor<PhoneNumberAdminClientImpl> phoneNumberAdminClientArg = ArgumentCaptor.forClass(PhoneNumberAdminClientImpl.class); final ArgumentCaptor<String> uaPolicyAppIdArg = ArgumentCaptor.forClass(String.class); final ArgumentCaptor<String> uaPolicySdkNameArg = ArgumentCaptor.forClass(String.class); final ArgumentCaptor<String> uaPolicySdkVersionArg = ArgumentCaptor.forClass(String.class); final ArgumentCaptor<Configuration> uaPolicyConfigArg = ArgumentCaptor.forClass(Configuration.class); final ArgumentCaptor<HttpLogOptions> httpLogOptionsArg = ArgumentCaptor.forClass(HttpLogOptions.class); ClientBuilderSpyHelper(PhoneNumbersClientBuilder clientBuilder) { this.clientBuilder = clientBuilder; this.initializeSpies(); } private void initializeSpies() { Answer<HmacAuthenticationPolicy> createCommunicationClientCredentialPolicy = (invocation) -> { this.authenticationPolicyRef.set((HmacAuthenticationPolicy) invocation.callRealMethod()); return this.authenticationPolicyRef.get(); }; doAnswer(createCommunicationClientCredentialPolicy).when(this.clientBuilder).createAuthenticationPolicy(); Answer<UserAgentPolicy> createUserAgentPolicy = (invocation) -> { this.userAgentPolicyRef.set(mock(UserAgentPolicy.class)); return this.userAgentPolicyRef.get(); }; doAnswer(createUserAgentPolicy).when(this.clientBuilder).createUserAgentPolicy(any(), any(), any(), any()); Answer<RequestIdPolicy> createRequestIdPolicy = (invocation) -> { this.requestIdPolicyRef.set(mock(RequestIdPolicy.class)); return this.requestIdPolicyRef.get(); }; doAnswer(createRequestIdPolicy).when(this.clientBuilder).createRequestIdPolicy(); Answer<CookiePolicy> createCookiePolicy = (invocation) -> { this.cookiePolicyRef.set((CookiePolicy) invocation.callRealMethod()); return this.cookiePolicyRef.get(); }; doAnswer(createCookiePolicy).when(this.clientBuilder).createCookiePolicy(); Answer<HttpLoggingPolicy> createHttpLoggingPolicy = (invocation) -> { this.httpLoggingPolicyRef.set((HttpLoggingPolicy) invocation.callRealMethod()); return this.httpLoggingPolicyRef.get(); }; doAnswer(createHttpLoggingPolicy).when(this.clientBuilder).createHttpLoggingPolicy(any()); Answer<HttpLogOptions> createDefaultHttpLogOptions = (invocation) -> { this.defaultHttpLogOptionsRef.set((HttpLogOptions) invocation.callRealMethod()); return this.defaultHttpLogOptionsRef.get(); }; doAnswer(createDefaultHttpLogOptions).when(this.clientBuilder).createDefaultHttpLogOptions(); } void capturePhoneNumberAdminClientImpl() { verify(this.clientBuilder, times(1)) .createPhoneNumberAsyncClient(this.phoneNumberAdminClientArg.capture()); } void captureHttpPipelineSettings() { verify(this.clientBuilder, times(1)) .createAuthenticationPolicy(); verify(this.clientBuilder, times(1)) .createUserAgentPolicy( this.uaPolicyAppIdArg.capture(), this.uaPolicySdkNameArg.capture(), this.uaPolicySdkVersionArg.capture(), this.uaPolicyConfigArg.capture()); verify(this.clientBuilder, times(1)) .createHttpLoggingPolicy(this.httpLogOptionsArg.capture()); } } }
class PhoneNumbersClientBuilderTest { private static final String ENDPOINT = "https: private static final String ACCESSKEY = "QWNjZXNzS2V5"; private static final Map<String, String> PROPERTIES = CoreUtils.getProperties("azure-communication-phonenumbers.properties"); private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private HttpClient httpClient; private PhoneNumbersClientBuilder clientBuilder; @BeforeEach void setUp() { this.httpClient = mock(HttpClient.class); this.clientBuilder = Mockito.spy(new PhoneNumbersClientBuilder()); } @AfterEach void tearDown() { Mockito.framework().clearInlineMock(this); } @Test() public void buildClientWithHttpClientWithCredential() { ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder); PhoneNumbersClient phoneNumberClient = this.setupBuilderWithHttpClientWithCredential(this.clientBuilder).buildClient(); assertNotNull(phoneNumberClient); validateRequiredSettings(spyHelper); } @Test() public void buildClientWithCustomPipeline() { ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder); HttpPipeline httpPipeline = mock(HttpPipeline.class); PhoneNumbersClient phoneNumberClient = this.setupBuilderCustomPipeline(httpPipeline).buildClient(); assertNotNull(phoneNumberClient); validateCustomPipeline(spyHelper, httpPipeline); } @Test() public void buildClientWithLogOptions() { ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder); HttpLogOptions logOptions = mock(HttpLogOptions.class); PhoneNumbersClient phoneNumberClient = this.setupBuilderWithHttpClientWithCredential(this.clientBuilder) .httpLogOptions(logOptions) .buildClient(); assertNotNull(phoneNumberClient); validateLogOptions(spyHelper, logOptions); } @Test() public void buildClientWithConfiguration() { ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder); Configuration configuration = mock(Configuration.class); PhoneNumbersClient phoneNumberClient = this.setupBuilderWithHttpClientWithCredential(this.clientBuilder) .configuration(configuration) .buildClient(); assertNotNull(phoneNumberClient); validateConfiguration(spyHelper, configuration); } @Test() public void buildClientWithServiceVersion() { PhoneNumbersClient phoneNumberClient = this.setupBuilderWithHttpClientWithCredential(this.clientBuilder) .serviceVersion(PhoneNumbersServiceVersion.V2021_03_07) .buildClient(); assertNotNull(phoneNumberClient); } @Test() public void buildClientWithOneAdditionalPolicy() { ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder); List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>(); additionalPolicies.add(mock(HttpPipelinePolicy.class)); PhoneNumbersClient phoneNumberClient = this.setupBuilderWithPolicies(this.clientBuilder, additionalPolicies) .buildClient(); assertNotNull(phoneNumberClient); validateAdditionalPolicies(spyHelper, additionalPolicies); } @Test() public void buildClientWithMultipleAdditionalPolicies() { ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder); List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>(); additionalPolicies.add(mock(HttpPipelinePolicy.class)); additionalPolicies.add(mock(HttpPipelinePolicy.class)); additionalPolicies.add(mock(HttpPipelinePolicy.class)); PhoneNumbersClient phoneNumberClient = this.setupBuilderWithPolicies(this.clientBuilder, additionalPolicies) .buildClient(); assertNotNull(phoneNumberClient); validateAdditionalPolicies(spyHelper, additionalPolicies); } @Test() public void buildClientNoEndpoint() { assertThrows(NullPointerException.class, () -> { this.clientBuilder.buildClient(); }); } @Test() public void buildClientNoPipelineNoCredentials() { assertThrows(NullPointerException.class, () -> { this.clientBuilder.endpoint(ENDPOINT).httpClient(this.httpClient).buildClient(); }); } @Test() public void buildAsyncClientWithHttpClientWithCredential() { ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder); PhoneNumbersAsyncClient phoneNumberAsyncClient = this.setupBuilderWithHttpClientWithCredential(this.clientBuilder).buildAsyncClient(); assertNotNull(phoneNumberAsyncClient); validateRequiredSettings(spyHelper); } @Test() public void buildAsyncClientWithCustomPipeline() { ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder); HttpPipeline httpPipeline = mock(HttpPipeline.class); PhoneNumbersAsyncClient phoneNumberAsyncClient = this.setupBuilderCustomPipeline(httpPipeline).buildAsyncClient(); assertNotNull(phoneNumberAsyncClient); validateCustomPipeline(spyHelper, httpPipeline); } @Test() public void buildAsyncClientWithLogOptions() { ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder); HttpLogOptions logOptions = mock(HttpLogOptions.class); PhoneNumbersAsyncClient phoneNumberAsyncClient = this.setupBuilderWithHttpClientWithCredential(this.clientBuilder) .httpLogOptions(logOptions) .buildAsyncClient(); assertNotNull(phoneNumberAsyncClient); validateLogOptions(spyHelper, logOptions); } @Test() public void buildAsyncClientWithConfiguration() { ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder); Configuration configuration = mock(Configuration.class); PhoneNumbersAsyncClient phoneNumberAsyncClient = this.setupBuilderWithHttpClientWithCredential(this.clientBuilder) .configuration(configuration) .buildAsyncClient(); assertNotNull(phoneNumberAsyncClient); validateConfiguration(spyHelper, configuration); } @Test() public void buildAsyncClientWithServiceVersion() { PhoneNumbersAsyncClient phoneNumberAsyncClient = this.setupBuilderWithHttpClientWithCredential(this.clientBuilder) .serviceVersion(PhoneNumbersServiceVersion.V2021_03_07) .buildAsyncClient(); assertNotNull(phoneNumberAsyncClient); } @Test() public void buildAsyncClientWithOneAdditionalPolicy() { ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder); List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>(); additionalPolicies.add(mock(HttpPipelinePolicy.class)); PhoneNumbersAsyncClient phoneNumberAsyncClient = this.setupBuilderWithPolicies(this.clientBuilder, additionalPolicies) .buildAsyncClient(); assertNotNull(phoneNumberAsyncClient); validateAdditionalPolicies(spyHelper, additionalPolicies); } @Test() public void buildAsyncClientWithMultipleAdditionalPolicies() { ClientBuilderSpyHelper spyHelper = new ClientBuilderSpyHelper(this.clientBuilder); List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>(); additionalPolicies.add(mock(HttpPipelinePolicy.class)); additionalPolicies.add(mock(HttpPipelinePolicy.class)); additionalPolicies.add(mock(HttpPipelinePolicy.class)); PhoneNumbersAsyncClient phoneNumberAsyncClient = this.setupBuilderWithPolicies(this.clientBuilder, additionalPolicies) .buildAsyncClient(); assertNotNull(phoneNumberAsyncClient); validateAdditionalPolicies(spyHelper, additionalPolicies); } @Test() public void buildAsyncClientNoEndpointThrows() { assertThrows(NullPointerException.class, () -> { this.clientBuilder.buildClient(); }); } @Test() public void buildAsyncClientNoPipelineNoCredentialsThrows() { assertThrows(NullPointerException.class, () -> { this.clientBuilder.endpoint(ENDPOINT).httpClient(this.httpClient).buildClient(); }); } @Test() public void setEndpointNullThrows() { assertThrows(NullPointerException.class, () -> { this.clientBuilder.endpoint(null); }); } @Test() public void addPolicyNullThrows() { assertThrows(NullPointerException.class, () -> { this.clientBuilder.addPolicy(null); }); } @Test @Test public void bothRetryOptionsAndRetryPolicySetAsync() { assertThrows(IllegalStateException.class, () -> setupBuilderWithHttpClientWithCredential(this.clientBuilder) .retryOptions(new RetryOptions(new ExponentialBackoffOptions())) .retryPolicy(new RetryPolicy()) .buildAsyncClient()); } private PhoneNumbersClientBuilder setupBuilderWithHttpClientWithCredential(PhoneNumbersClientBuilder clientBuilder) { return clientBuilder .endpoint(ENDPOINT) .httpClient(this.httpClient) .credential(new AzureKeyCredential(ACCESSKEY)); } private PhoneNumbersClientBuilder setupBuilderWithPolicies( PhoneNumbersClientBuilder clientBuilder, List<HttpPipelinePolicy> policies) { clientBuilder = this.setupBuilderWithHttpClientWithCredential(clientBuilder); for (HttpPipelinePolicy policy : policies) { clientBuilder.addPolicy(policy); } return clientBuilder; } private PhoneNumbersClientBuilder setupBuilderCustomPipeline(HttpPipeline pipeline) { return clientBuilder .endpoint(ENDPOINT) .pipeline(pipeline); } private void validateRequiredSettings(ClientBuilderSpyHelper spyHelper) { spyHelper.capturePhoneNumberAdminClientImpl(); spyHelper.captureHttpPipelineSettings(); PhoneNumberAdminClientImpl phoneNumberManagementClient = spyHelper.phoneNumberAdminClientArg.getValue(); assertEquals(ENDPOINT, phoneNumberManagementClient.getEndpoint()); assertEquals(this.httpClient, phoneNumberManagementClient.getHttpPipeline().getHttpClient()); assertEquals(6, phoneNumberManagementClient.getHttpPipeline().getPolicyCount()); assertEquals(spyHelper.userAgentPolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(0)); assertEquals(spyHelper.requestIdPolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(1)); assertEquals(spyHelper.authenticationPolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(3)); assertEquals(spyHelper.cookiePolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(4)); assertEquals(spyHelper.httpLoggingPolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(5)); assertEquals(spyHelper.defaultHttpLogOptionsRef.get(), spyHelper.httpLogOptionsArg.getValue()); assertEquals(spyHelper.defaultHttpLogOptionsRef.get().getApplicationId(), spyHelper.uaPolicyAppIdArg.getValue()); assertEquals(PROPERTIES.get((SDK_NAME)), spyHelper.uaPolicySdkNameArg.getValue()); assertEquals(PROPERTIES.get((SDK_VERSION)), spyHelper.uaPolicySdkVersionArg.getValue()); assertNull(spyHelper.uaPolicyConfigArg.getValue()); } private void validateCustomPipeline(ClientBuilderSpyHelper spyHelper, HttpPipeline expectedPipeline) { spyHelper.capturePhoneNumberAdminClientImpl(); PhoneNumberAdminClientImpl phoneNumberAdminClient = spyHelper.phoneNumberAdminClientArg.getValue(); assertEquals(expectedPipeline, phoneNumberAdminClient.getHttpPipeline()); } private void validateLogOptions(ClientBuilderSpyHelper spyHelper, HttpLogOptions expectedLogOptions) { spyHelper.captureHttpPipelineSettings(); HttpLogOptions actualLogOptions = spyHelper.httpLogOptionsArg.getValue(); assertEquals(expectedLogOptions, actualLogOptions); } private void validateConfiguration(ClientBuilderSpyHelper spyHelper, Configuration expectedConfiguration) { spyHelper.captureHttpPipelineSettings(); Configuration actualConfiguration = spyHelper.uaPolicyConfigArg.getValue(); assertEquals(expectedConfiguration, actualConfiguration); } private void validateAdditionalPolicies(ClientBuilderSpyHelper spyHelper, List<HttpPipelinePolicy> policies) { spyHelper.capturePhoneNumberAdminClientImpl(); PhoneNumberAdminClientImpl phoneNumberManagementClient = spyHelper.phoneNumberAdminClientArg.getValue(); int expectedPolicyCount = 6 + policies.size(); int lastPolicyIndex = expectedPolicyCount - 1; int customPolicyIndex = 5; assertEquals(expectedPolicyCount, phoneNumberManagementClient.getHttpPipeline().getPolicyCount()); assertEquals(spyHelper.userAgentPolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(0)); assertEquals(spyHelper.requestIdPolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(1)); assertEquals(spyHelper.authenticationPolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(3)); assertEquals(spyHelper.cookiePolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(4)); assertEquals(spyHelper.httpLoggingPolicyRef.get(), phoneNumberManagementClient.getHttpPipeline().getPolicy(lastPolicyIndex)); for (HttpPipelinePolicy policy : policies) { assertEquals(policy, phoneNumberManagementClient.getHttpPipeline().getPolicy(customPolicyIndex)); customPolicyIndex++; } } private class ClientBuilderSpyHelper { final PhoneNumbersClientBuilder clientBuilder; final AtomicReference<HmacAuthenticationPolicy> authenticationPolicyRef = new AtomicReference<>(); final AtomicReference<UserAgentPolicy> userAgentPolicyRef = new AtomicReference<>(); final AtomicReference<RequestIdPolicy> requestIdPolicyRef = new AtomicReference<>(); final AtomicReference<RetryPolicy> retryPolicyRef = new AtomicReference<>(); final AtomicReference<CookiePolicy> cookiePolicyRef = new AtomicReference<>(); final AtomicReference<HttpLoggingPolicy> httpLoggingPolicyRef = new AtomicReference<>(); final AtomicReference<HttpLogOptions> defaultHttpLogOptionsRef = new AtomicReference<>(); final ArgumentCaptor<PhoneNumberAdminClientImpl> phoneNumberAdminClientArg = ArgumentCaptor.forClass(PhoneNumberAdminClientImpl.class); final ArgumentCaptor<String> uaPolicyAppIdArg = ArgumentCaptor.forClass(String.class); final ArgumentCaptor<String> uaPolicySdkNameArg = ArgumentCaptor.forClass(String.class); final ArgumentCaptor<String> uaPolicySdkVersionArg = ArgumentCaptor.forClass(String.class); final ArgumentCaptor<Configuration> uaPolicyConfigArg = ArgumentCaptor.forClass(Configuration.class); final ArgumentCaptor<HttpLogOptions> httpLogOptionsArg = ArgumentCaptor.forClass(HttpLogOptions.class); ClientBuilderSpyHelper(PhoneNumbersClientBuilder clientBuilder) { this.clientBuilder = clientBuilder; this.initializeSpies(); } private void initializeSpies() { Answer<HmacAuthenticationPolicy> createCommunicationClientCredentialPolicy = (invocation) -> { this.authenticationPolicyRef.set((HmacAuthenticationPolicy) invocation.callRealMethod()); return this.authenticationPolicyRef.get(); }; doAnswer(createCommunicationClientCredentialPolicy).when(this.clientBuilder).createAuthenticationPolicy(); Answer<UserAgentPolicy> createUserAgentPolicy = (invocation) -> { this.userAgentPolicyRef.set(mock(UserAgentPolicy.class)); return this.userAgentPolicyRef.get(); }; doAnswer(createUserAgentPolicy).when(this.clientBuilder).createUserAgentPolicy(any(), any(), any(), any()); Answer<RequestIdPolicy> createRequestIdPolicy = (invocation) -> { this.requestIdPolicyRef.set(mock(RequestIdPolicy.class)); return this.requestIdPolicyRef.get(); }; doAnswer(createRequestIdPolicy).when(this.clientBuilder).createRequestIdPolicy(); Answer<CookiePolicy> createCookiePolicy = (invocation) -> { this.cookiePolicyRef.set((CookiePolicy) invocation.callRealMethod()); return this.cookiePolicyRef.get(); }; doAnswer(createCookiePolicy).when(this.clientBuilder).createCookiePolicy(); Answer<HttpLoggingPolicy> createHttpLoggingPolicy = (invocation) -> { this.httpLoggingPolicyRef.set((HttpLoggingPolicy) invocation.callRealMethod()); return this.httpLoggingPolicyRef.get(); }; doAnswer(createHttpLoggingPolicy).when(this.clientBuilder).createHttpLoggingPolicy(any()); Answer<HttpLogOptions> createDefaultHttpLogOptions = (invocation) -> { this.defaultHttpLogOptionsRef.set((HttpLogOptions) invocation.callRealMethod()); return this.defaultHttpLogOptionsRef.get(); }; doAnswer(createDefaultHttpLogOptions).when(this.clientBuilder).createDefaultHttpLogOptions(); } void capturePhoneNumberAdminClientImpl() { verify(this.clientBuilder, times(1)) .createPhoneNumberAsyncClient(this.phoneNumberAdminClientArg.capture()); } void captureHttpPipelineSettings() { verify(this.clientBuilder, times(1)) .createAuthenticationPolicy(); verify(this.clientBuilder, times(1)) .createUserAgentPolicy( this.uaPolicyAppIdArg.capture(), this.uaPolicySdkNameArg.capture(), this.uaPolicySdkVersionArg.capture(), this.uaPolicyConfigArg.capture()); verify(this.clientBuilder, times(1)) .createHttpLoggingPolicy(this.httpLogOptionsArg.capture()); } } }
Now do we have tests for all four bulk overloads , 2 for async and 2 for sync ?
public void crudOnDifferentOverload() { List<EncryptionPojo> actualProperties = new ArrayList<>(); EncryptionPojo properties = getItem(UUID.randomUUID().toString()); CosmosItemResponse<EncryptionPojo> itemResponse = this.cosmosEncryptionContainer.createItem(properties); assertThat(itemResponse.getRequestCharge()).isGreaterThan(0); EncryptionPojo responseItem = itemResponse.getItem(); validateResponse(properties, responseItem); actualProperties.add(properties); properties = getItem(UUID.randomUUID().toString()); CosmosItemResponse<EncryptionPojo> itemResponse1 = this.cosmosEncryptionContainer.createItem(properties, new CosmosItemRequestOptions()); assertThat(itemResponse1.getRequestCharge()).isGreaterThan(0); EncryptionPojo responseItem1 = itemResponse1.getItem(); validateResponse(properties, responseItem1); actualProperties.add(properties); properties = getItem(UUID.randomUUID().toString()); CosmosItemResponse<EncryptionPojo> upsertResponse1 = this.cosmosEncryptionContainer.upsertItem(properties); assertThat(upsertResponse1.getRequestCharge()).isGreaterThan(0); EncryptionPojo responseItem2 = upsertResponse1.getItem(); validateResponse(properties, responseItem2); actualProperties.add(properties); properties = getItem(UUID.randomUUID().toString()); CosmosItemResponse<EncryptionPojo> upsertResponse2 = this.cosmosEncryptionContainer.upsertItem(properties, new CosmosItemRequestOptions()); assertThat(upsertResponse2.getRequestCharge()).isGreaterThan(0); EncryptionPojo responseItem3 = upsertResponse2.getItem(); validateResponse(properties, responseItem3); actualProperties.add(properties); EncryptionPojo readItem = this.cosmosEncryptionContainer.readItem(actualProperties.get(0).getId(), new PartitionKey(actualProperties.get(0).getMypk()), EncryptionPojo.class).getItem(); validateResponse(actualProperties.get(0), readItem); String query = String.format("SELECT * from c where c.id = '%s'", actualProperties.get(1).getId()); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); CosmosPagedIterable<EncryptionPojo> feedResponseIterator = this.cosmosEncryptionContainer.queryItems(query, cosmosQueryRequestOptions, EncryptionPojo.class); List<EncryptionPojo> feedResponse = new ArrayList<>(); feedResponseIterator.iterator().forEachRemaining(pojo -> { feedResponse.add(pojo); }); assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1); for (EncryptionPojo pojo : feedResponse) { if (pojo.getId().equals(properties.getId())) { EncryptionAsyncApiCrudTest.validateResponse(pojo, responseItem); } } CosmosQueryRequestOptions cosmosQueryRequestOptions1 = new CosmosQueryRequestOptions(); SqlQuerySpec querySpec = new SqlQuerySpec(query); CosmosPagedIterable<EncryptionPojo> feedResponseIterator2 = this.cosmosEncryptionContainer.queryItems(querySpec, cosmosQueryRequestOptions1, EncryptionPojo.class); List<EncryptionPojo> feedResponse2 = new ArrayList<>(); feedResponseIterator2.iterator().forEachRemaining(pojo -> { feedResponse2.add(pojo); }); assertThat(feedResponse2.size()).isGreaterThanOrEqualTo(1); for (EncryptionPojo pojo : feedResponse2) { if (pojo.getId().equals(properties.getId())) { EncryptionAsyncApiCrudTest.validateResponse(pojo, responseItem); } } CosmosItemRequestOptions requestOptions = new CosmosItemRequestOptions(); CosmosItemResponse<EncryptionPojo> replaceResponse = this.cosmosEncryptionContainer.replaceItem(actualProperties.get(2), actualProperties.get(2).getId(), new PartitionKey(actualProperties.get(2).getMypk()), requestOptions); assertThat(upsertResponse1.getRequestCharge()).isGreaterThan(0); responseItem = replaceResponse.getItem(); validateResponse(actualProperties.get(2), responseItem); CosmosItemResponse<?> deleteResponse1 = this.cosmosEncryptionContainer.deleteItem(actualProperties.get(1).getId(), new PartitionKey(actualProperties.get(1).getMypk()), new CosmosItemRequestOptions()); assertThat(deleteResponse1.getStatusCode()).isEqualTo(204); CosmosItemResponse<?> deleteResponse2 = this.cosmosEncryptionContainer.deleteItem(actualProperties.get(2), new CosmosItemRequestOptions()); assertThat(deleteResponse2.getStatusCode()).isEqualTo(204); CosmosItemResponse<?> deleteResponse3 = this.cosmosEncryptionContainer.deleteAllItemsByPartitionKey(new PartitionKey(actualProperties.get(3).getMypk()), new CosmosItemRequestOptions()); assertThat(deleteResponse3.getStatusCode()).isEqualTo(200); }
EncryptionPojo readItem = this.cosmosEncryptionContainer.readItem(actualProperties.get(0).getId(),
public void crudOnDifferentOverload() { List<EncryptionPojo> actualProperties = new ArrayList<>(); EncryptionPojo properties = getItem(UUID.randomUUID().toString()); CosmosItemResponse<EncryptionPojo> itemResponse = this.cosmosEncryptionContainer.createItem(properties); assertThat(itemResponse.getRequestCharge()).isGreaterThan(0); EncryptionPojo responseItem = itemResponse.getItem(); validateResponse(properties, responseItem); actualProperties.add(properties); properties = getItem(UUID.randomUUID().toString()); CosmosItemResponse<EncryptionPojo> itemResponse1 = this.cosmosEncryptionContainer.createItem(properties, new CosmosItemRequestOptions()); assertThat(itemResponse1.getRequestCharge()).isGreaterThan(0); EncryptionPojo responseItem1 = itemResponse1.getItem(); validateResponse(properties, responseItem1); actualProperties.add(properties); properties = getItem(UUID.randomUUID().toString()); CosmosItemResponse<EncryptionPojo> upsertResponse1 = this.cosmosEncryptionContainer.upsertItem(properties); assertThat(upsertResponse1.getRequestCharge()).isGreaterThan(0); EncryptionPojo responseItem2 = upsertResponse1.getItem(); validateResponse(properties, responseItem2); actualProperties.add(properties); properties = getItem(UUID.randomUUID().toString()); CosmosItemResponse<EncryptionPojo> upsertResponse2 = this.cosmosEncryptionContainer.upsertItem(properties, new CosmosItemRequestOptions()); assertThat(upsertResponse2.getRequestCharge()).isGreaterThan(0); EncryptionPojo responseItem3 = upsertResponse2.getItem(); validateResponse(properties, responseItem3); actualProperties.add(properties); EncryptionPojo readItem = this.cosmosEncryptionContainer.readItem(actualProperties.get(0).getId(), new PartitionKey(actualProperties.get(0).getMypk()), EncryptionPojo.class).getItem(); validateResponse(actualProperties.get(0), readItem); String query = String.format("SELECT * from c where c.id = '%s'", actualProperties.get(1).getId()); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); CosmosPagedIterable<EncryptionPojo> feedResponseIterator = this.cosmosEncryptionContainer.queryItems(query, cosmosQueryRequestOptions, EncryptionPojo.class); List<EncryptionPojo> feedResponse = new ArrayList<>(); feedResponseIterator.iterator().forEachRemaining(pojo -> { feedResponse.add(pojo); }); assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1); for (EncryptionPojo pojo : feedResponse) { if (pojo.getId().equals(properties.getId())) { EncryptionAsyncApiCrudTest.validateResponse(pojo, responseItem); } } CosmosQueryRequestOptions cosmosQueryRequestOptions1 = new CosmosQueryRequestOptions(); SqlQuerySpec querySpec = new SqlQuerySpec(query); CosmosPagedIterable<EncryptionPojo> feedResponseIterator2 = this.cosmosEncryptionContainer.queryItems(querySpec, cosmosQueryRequestOptions1, EncryptionPojo.class); List<EncryptionPojo> feedResponse2 = new ArrayList<>(); feedResponseIterator2.iterator().forEachRemaining(pojo -> { feedResponse2.add(pojo); }); assertThat(feedResponse2.size()).isGreaterThanOrEqualTo(1); for (EncryptionPojo pojo : feedResponse2) { if (pojo.getId().equals(properties.getId())) { EncryptionAsyncApiCrudTest.validateResponse(pojo, responseItem); } } CosmosItemRequestOptions requestOptions = new CosmosItemRequestOptions(); CosmosItemResponse<EncryptionPojo> replaceResponse = this.cosmosEncryptionContainer.replaceItem(actualProperties.get(2), actualProperties.get(2).getId(), new PartitionKey(actualProperties.get(2).getMypk()), requestOptions); assertThat(upsertResponse1.getRequestCharge()).isGreaterThan(0); responseItem = replaceResponse.getItem(); validateResponse(actualProperties.get(2), responseItem); CosmosItemResponse<?> deleteResponse1 = this.cosmosEncryptionContainer.deleteItem(actualProperties.get(1).getId(), new PartitionKey(actualProperties.get(1).getMypk()), new CosmosItemRequestOptions()); assertThat(deleteResponse1.getStatusCode()).isEqualTo(204); CosmosItemResponse<?> deleteResponse2 = this.cosmosEncryptionContainer.deleteItem(actualProperties.get(2), new CosmosItemRequestOptions()); assertThat(deleteResponse2.getStatusCode()).isEqualTo(204); CosmosItemResponse<?> deleteResponse3 = this.cosmosEncryptionContainer.deleteAllItemsByPartitionKey(new PartitionKey(actualProperties.get(3).getMypk()), new CosmosItemRequestOptions()); assertThat(deleteResponse3.getStatusCode()).isEqualTo(200); }
class EncryptionSyncApiCrudTest extends TestSuiteBase { private CosmosClient client; private CosmosEncryptionClient cosmosEncryptionClient; private CosmosEncryptionContainer cosmosEncryptionContainer; @Factory(dataProvider = "clientBuildersWithSessionConsistency") public EncryptionSyncApiCrudTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @BeforeClass(groups = {"encryption"}, timeOut = SETUP_TIMEOUT) public void before_CosmosItemTest() { assertThat(this.client).isNull(); this.client = getClientBuilder().buildClient(); EncryptionAsyncApiCrudTest.TestEncryptionKeyStoreProvider encryptionKeyStoreProvider = new EncryptionAsyncApiCrudTest.TestEncryptionKeyStoreProvider(); this.cosmosEncryptionClient = CosmosEncryptionClient.createCosmosEncryptionClient(this.client, encryptionKeyStoreProvider); this.cosmosEncryptionContainer = getSharedSyncEncryptionContainer(this.cosmosEncryptionClient); } @AfterClass(groups = {"encryption"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { assertThat(this.client).isNotNull(); this.client.close(); } @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void createItemEncrypt_readItemDecrypt() { EncryptionPojo properties = getItem(UUID.randomUUID().toString()); CosmosItemResponse<EncryptionPojo> itemResponse = this.cosmosEncryptionContainer.createItem(properties, new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()); assertThat(itemResponse.getRequestCharge()).isGreaterThan(0); EncryptionPojo responseItem = itemResponse.getItem(); validateResponse(properties, responseItem); EncryptionPojo readItem = this.cosmosEncryptionContainer.readItem(properties.getId(), new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions(), EncryptionPojo.class).getItem(); validateResponse(properties, readItem); properties = getItem(UUID.randomUUID().toString()); String longString = ""; for (int i = 0; i < 10000; i++) { longString += "a"; } properties.setSensitiveString(longString); itemResponse = cosmosEncryptionContainer.createItem(properties, new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()); assertThat(itemResponse.getRequestCharge()).isGreaterThan(0); responseItem = itemResponse.getItem(); validateResponse(properties, responseItem); } @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void upsertItem_readItem() { EncryptionPojo properties = getItem(UUID.randomUUID().toString()); CosmosItemResponse<EncryptionPojo> itemResponse = this.cosmosEncryptionContainer.upsertItem(properties, new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()); assertThat(itemResponse.getRequestCharge()).isGreaterThan(0); EncryptionPojo responseItem = itemResponse.getItem(); validateResponse(properties, responseItem); EncryptionPojo readItem = this.cosmosEncryptionContainer.readItem(properties.getId(), new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions(), EncryptionPojo.class).getItem(); validateResponse(properties, readItem); } @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void queryItems() { EncryptionPojo properties = getItem(UUID.randomUUID().toString()); CosmosItemResponse<EncryptionPojo> itemResponse = this.cosmosEncryptionContainer.createItem(properties, new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()); assertThat(itemResponse.getRequestCharge()).isGreaterThan(0); EncryptionPojo responseItem = itemResponse.getItem(); validateResponse(properties, responseItem); String query = String.format("SELECT * from c where c.id = '%s'", properties.getId()); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); SqlQuerySpec querySpec = new SqlQuerySpec(query); CosmosPagedIterable<EncryptionPojo> feedResponseIterator = this.cosmosEncryptionContainer.queryItems(querySpec, cosmosQueryRequestOptions, EncryptionPojo.class); List<EncryptionPojo> feedResponse = new ArrayList<>(); feedResponseIterator.iterator().forEachRemaining(pojo -> { feedResponse.add(pojo); }); assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1); for (EncryptionPojo pojo : feedResponse) { if (pojo.getId().equals(properties.getId())) { validateResponse(pojo, responseItem); } } } @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void queryItemsOnEncryptedProperties() { EncryptionPojo properties = getItem(UUID.randomUUID().toString()); CosmosItemResponse<EncryptionPojo> itemResponse = this.cosmosEncryptionContainer.createItem(properties, new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()); assertThat(itemResponse.getRequestCharge()).isGreaterThan(0); EncryptionPojo responseItem = itemResponse.getItem(); validateResponse(properties, responseItem); String query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" + " " + "@nonSensitive and c.sensitiveLong = @sensitiveLong"); SqlQuerySpec querySpec = new SqlQuerySpec(query); SqlParameter parameter1 = new SqlParameter("@nonSensitive", properties.getNonSensitive()); querySpec.getParameters().add(parameter1); SqlParameter parameter2 = new SqlParameter("@sensitiveString", properties.getSensitiveString()); SqlParameter parameter3 = new SqlParameter("@sensitiveLong", properties.getSensitiveLong()); SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec); sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2); sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveLong", parameter3); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); CosmosPagedIterable<EncryptionPojo> feedResponseIterator = this.cosmosEncryptionContainer.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption, cosmosQueryRequestOptions, EncryptionPojo.class); List<EncryptionPojo> feedResponse = new ArrayList<>(); feedResponseIterator.iterator().forEachRemaining(pojo -> { feedResponse.add(pojo); }); assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1); for (EncryptionPojo pojo : feedResponse) { if (pojo.getId().equals(properties.getId())) { validateResponse(pojo, responseItem); } } } @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void queryItemsOnRandomizedEncryption() { EncryptionPojo properties = getItem(UUID.randomUUID().toString()); CosmosItemResponse<EncryptionPojo> itemResponse = this.cosmosEncryptionContainer.createItem(properties, new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()); assertThat(itemResponse.getRequestCharge()).isGreaterThan(0); EncryptionPojo responseItem = itemResponse.getItem(); validateResponse(properties, responseItem); String query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" + " " + "@nonSensitive and c.sensitiveDouble = @sensitiveDouble"); SqlQuerySpec querySpec = new SqlQuerySpec(query); SqlParameter parameter1 = new SqlParameter("@nonSensitive", properties.getNonSensitive()); querySpec.getParameters().add(parameter1); SqlParameter parameter2 = new SqlParameter("@sensitiveString", properties.getSensitiveString()); SqlParameter parameter3 = new SqlParameter("@sensitiveDouble", properties.getSensitiveDouble()); SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec); sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2); sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveDouble", parameter3); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); CosmosPagedIterable<EncryptionPojo> feedResponseIterator = this.cosmosEncryptionContainer.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption, cosmosQueryRequestOptions, EncryptionPojo.class); try { List<EncryptionPojo> feedResponse = new ArrayList<>(); feedResponseIterator.iterator().forEachRemaining(pojo -> { feedResponse.add(pojo); }); fail("Query on randomized parameter should fail"); } catch (IllegalArgumentException ex) { assertThat(ex.getMessage()).contains("Path /sensitiveDouble cannot be used in the " + "query because of randomized encryption"); } } @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void queryItemsWithContinuationTokenAndPageSize() throws Exception { List<String> actualIds = new ArrayList<>(); EncryptionPojo properties = getItem(UUID.randomUUID().toString()); this.cosmosEncryptionContainer.createItem(properties, new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()); actualIds.add(properties.getId()); properties = getItem(UUID.randomUUID().toString()); this.cosmosEncryptionContainer.createItem(properties, new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()); actualIds.add(properties.getId()); properties = getItem(UUID.randomUUID().toString()); this.cosmosEncryptionContainer.createItem(properties, new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()); actualIds.add(properties.getId()); String query = String.format("SELECT * from c where c.id in ('%s', '%s', '%s')", actualIds.get(0), actualIds.get(1), actualIds.get(2)); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); String continuationToken = null; int pageSize = 1; int initialDocumentCount = 3; int finalDocumentCount = 0; CosmosPagedIterable<EncryptionPojo> pojoCosmosPagedIterable = this.cosmosEncryptionContainer.queryItems(query, cosmosQueryRequestOptions, EncryptionPojo.class); do { Iterable<FeedResponse<EncryptionPojo>> feedResponseIterable = pojoCosmosPagedIterable.iterableByPage(continuationToken, 1); for (FeedResponse<EncryptionPojo> fr : feedResponseIterable) { int resultSize = fr.getResults().size(); assertThat(resultSize).isEqualTo(pageSize); finalDocumentCount += fr.getResults().size(); continuationToken = fr.getContinuationToken(); } } while (continuationToken != null); assertThat(finalDocumentCount).isEqualTo(initialDocumentCount); } @Test(groups = {"encryption"}, timeOut = TIMEOUT) @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void batchExecution() { String itemId = UUID.randomUUID().toString(); EncryptionPojo createPojo = getItem(itemId); EncryptionPojo replacePojo = getItem(itemId); replacePojo.setSensitiveString("ReplacedSensitiveString"); CosmosBatch cosmosBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId)); cosmosBatch.createItemOperation(createPojo); cosmosBatch.replaceItemOperation(itemId, replacePojo); cosmosBatch.upsertItemOperation(createPojo); cosmosBatch.readItemOperation(itemId); cosmosBatch.deleteItemOperation(itemId); CosmosBatchResponse batchResponse = this.cosmosEncryptionContainer.executeCosmosBatch(cosmosBatch); assertThat(batchResponse.getResults().size()).isEqualTo(5); assertThat(batchResponse.getResults().get(0).getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(batchResponse.getResults().get(1).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); assertThat(batchResponse.getResults().get(2).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); assertThat(batchResponse.getResults().get(3).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); assertThat(batchResponse.getResults().get(4).getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code()); validateResponse(batchResponse.getResults().get(0).getItem(EncryptionPojo.class), createPojo); validateResponse(batchResponse.getResults().get(1).getItem(EncryptionPojo.class), replacePojo); validateResponse(batchResponse.getResults().get(2).getItem(EncryptionPojo.class), createPojo); validateResponse(batchResponse.getResults().get(3).getItem(EncryptionPojo.class), createPojo); } @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void batchExecutionWithOptionsApi() { String itemId = UUID.randomUUID().toString(); EncryptionPojo createPojo = getItem(itemId); EncryptionPojo replacePojo = getItem(itemId); replacePojo.setSensitiveString("ReplacedSensitiveString"); CosmosBatch cosmosBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId)); CosmosBatchItemRequestOptions cosmosBatchItemRequestOptions = new CosmosBatchItemRequestOptions(); cosmosBatch.createItemOperation(createPojo, cosmosBatchItemRequestOptions); cosmosBatch.replaceItemOperation(itemId, replacePojo, cosmosBatchItemRequestOptions); cosmosBatch.upsertItemOperation(createPojo, cosmosBatchItemRequestOptions); cosmosBatch.readItemOperation(itemId, cosmosBatchItemRequestOptions); cosmosBatch.deleteItemOperation(itemId, cosmosBatchItemRequestOptions); CosmosBatchResponse batchResponse = this.cosmosEncryptionContainer.executeCosmosBatch(cosmosBatch, new CosmosBatchRequestOptions()); assertThat(batchResponse.getResults().size()).isEqualTo(5); assertThat(batchResponse.getResults().get(0).getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(batchResponse.getResults().get(1).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); assertThat(batchResponse.getResults().get(2).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); assertThat(batchResponse.getResults().get(3).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); assertThat(batchResponse.getResults().get(4).getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code()); validateResponse(batchResponse.getResults().get(0).getItem(EncryptionPojo.class), createPojo); validateResponse(batchResponse.getResults().get(1).getItem(EncryptionPojo.class), replacePojo); validateResponse(batchResponse.getResults().get(2).getItem(EncryptionPojo.class), createPojo); validateResponse(batchResponse.getResults().get(3).getItem(EncryptionPojo.class), createPojo); } @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void patchItem() { String itemId = UUID.randomUUID().toString(); EncryptionPojo createPojo = getItem(itemId); CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionContainer.createItem(createPojo, new PartitionKey(createPojo.getMypk()), new CosmosItemRequestOptions()); int originalSensitiveInt = createPojo.getSensitiveInt(); int newSensitiveInt = originalSensitiveInt + 1; String itemIdToReplace = UUID.randomUUID().toString(); EncryptionPojo nestedEncryptionPojoToReplace = getItem(itemIdToReplace); nestedEncryptionPojoToReplace.setSensitiveString("testing"); CosmosPatchOperations cosmosPatchOperations = CosmosPatchOperations.create(); cosmosPatchOperations.add("/sensitiveString", "patched"); cosmosPatchOperations.remove("/sensitiveDouble"); cosmosPatchOperations.replace("/sensitiveInt", newSensitiveInt); cosmosPatchOperations.replace("/sensitiveNestedPojo", nestedEncryptionPojoToReplace); cosmosPatchOperations.set("/sensitiveBoolean", false); CosmosPatchItemRequestOptions options = new CosmosPatchItemRequestOptions(); CosmosItemResponse<EncryptionPojo> response = this.cosmosEncryptionContainer.patchItem( createPojo.getId(), new PartitionKey(createPojo.getMypk()), cosmosPatchOperations, options, EncryptionPojo.class); assertThat(response.getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); EncryptionPojo patchedItem = response.getItem(); assertThat(patchedItem).isNotNull(); assertThat(patchedItem.getSensitiveString()).isEqualTo("patched"); assertThat(patchedItem.getSensitiveDouble()).isNull(); assertThat(patchedItem.getSensitiveNestedPojo()).isNotNull(); assertThat(patchedItem.getSensitiveInt()).isEqualTo(newSensitiveInt); assertThat(patchedItem.isSensitiveBoolean()).isEqualTo(false); response = this.cosmosEncryptionContainer.readItem( createPojo.getId(), new PartitionKey(createPojo.getMypk()), options, EncryptionPojo.class); assertThat(response.getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); validateResponse(patchedItem, response.getItem()); } private int getTotalRequest() { int countRequest = new Random().nextInt(100) + 120; logger.info("Total count of request for this test case: " + countRequest); return countRequest; } @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void bulkExecution_createItem() { int totalRequest = getTotalRequest(); Map<String, EncryptionPojo> idToItemMap = new HashMap<>(); List<CosmosItemOperation> cosmosItemOperationsList = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String itemId = UUID.randomUUID().toString(); EncryptionPojo createPojo = getItem(itemId); idToItemMap.put(itemId, createPojo); cosmosItemOperationsList.add(CosmosBulkOperations.getCreateItemOperation(createPojo, new PartitionKey(createPojo.getMypk()))); } List<CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest>> bulkResponse = Lists.newArrayList(this.cosmosEncryptionContainer. executeBulkOperations(cosmosItemOperationsList)); AtomicInteger processedDoc = new AtomicInteger(0); for (CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest> cosmosBulkOperationResponse : bulkResponse) { processedDoc.incrementAndGet(); CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse(); if (cosmosBulkOperationResponse.getException() != null) { logger.error("Bulk operation failed", cosmosBulkOperationResponse.getException()); fail(cosmosBulkOperationResponse.getException().toString()); } assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); EncryptionPojo item = cosmosBulkItemResponse.getItem(EncryptionPojo.class); validateResponse(item, idToItemMap.get(item.getId())); } assertThat(processedDoc.get()).isEqualTo(totalRequest); } @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void bulkExecution_upsertItem() { int totalRequest = getTotalRequest(); Map<String, EncryptionPojo> idToItemMap = new HashMap<>(); List<CosmosItemOperation> cosmosItemOperationsList = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String itemId = UUID.randomUUID().toString(); EncryptionPojo createPojo = getItem(itemId); idToItemMap.put(itemId, createPojo); cosmosItemOperationsList.add(CosmosBulkOperations.getUpsertItemOperation(createPojo, new PartitionKey(createPojo.getMypk()))); } List<CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest>> bulkResponse = Lists.newArrayList(this.cosmosEncryptionContainer. executeBulkOperations(cosmosItemOperationsList, new CosmosBulkExecutionOptions())); AtomicInteger processedDoc = new AtomicInteger(0); for (CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest> cosmosBulkOperationResponse : bulkResponse) { processedDoc.incrementAndGet(); CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse(); if (cosmosBulkOperationResponse.getException() != null) { logger.error("Bulk operation failed", cosmosBulkOperationResponse.getException()); fail(cosmosBulkOperationResponse.getException().toString()); } assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); EncryptionPojo item = cosmosBulkItemResponse.getItem(EncryptionPojo.class); validateResponse(item, idToItemMap.get(item.getId())); } ; assertThat(processedDoc.get()).isEqualTo(totalRequest); } @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void bulkExecution_deleteItem() { int totalRequest = Math.min(getTotalRequest(), 20); List<CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String itemId = UUID.randomUUID().toString(); EncryptionPojo createPojo = getItem(itemId); cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(createPojo, new PartitionKey(createPojo.getMypk()))); } createItemsAndVerify(cosmosItemOperations); List<CosmosItemOperation> deleteCosmosItemOperations = new ArrayList<>(); for (CosmosItemOperation cosmosItemOperation : cosmosItemOperations) { EncryptionPojo encryptionPojo = cosmosItemOperation.getItem(); deleteCosmosItemOperations.add(CosmosBulkOperations.getDeleteItemOperation(encryptionPojo.getId(), cosmosItemOperation.getPartitionKeyValue())); } List<CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest>> bulkResponse = Lists.newArrayList(this.cosmosEncryptionContainer .executeBulkOperations(deleteCosmosItemOperations)); AtomicInteger processedDoc = new AtomicInteger(0); for (CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest> cosmosBulkOperationResponse : bulkResponse) { processedDoc.incrementAndGet(); CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse(); if (cosmosBulkOperationResponse.getException() != null) { logger.error("Bulk operation failed", cosmosBulkOperationResponse.getException()); fail(cosmosBulkOperationResponse.getException().toString()); } assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code()); assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); } ; assertThat(processedDoc.get()).isEqualTo(totalRequest); } @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void bulkExecution_readItem() { int totalRequest = getTotalRequest(); List<CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); Map<String, EncryptionPojo> idToItemMap = new HashMap<>(); for (int i = 0; i < totalRequest; i++) { String itemId = UUID.randomUUID().toString(); EncryptionPojo createPojo = getItem(itemId); idToItemMap.put(itemId, createPojo); cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(createPojo, new PartitionKey(createPojo.getMypk()))); } createItemsAndVerify(cosmosItemOperations); List<CosmosItemOperation> readCosmosItemOperations = new ArrayList<>(); for (CosmosItemOperation cosmosItemOperation : cosmosItemOperations) { EncryptionPojo encryptionPojo = cosmosItemOperation.getItem(); readCosmosItemOperations.add(CosmosBulkOperations.getReadItemOperation(encryptionPojo.getId(), cosmosItemOperation.getPartitionKeyValue())); } List<CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest>> bulkResponse = Lists.newArrayList(this.cosmosEncryptionContainer .executeBulkOperations(readCosmosItemOperations)); AtomicInteger processedDoc = new AtomicInteger(0); for (CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest> cosmosBulkOperationResponse : bulkResponse) { processedDoc.incrementAndGet(); CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse(); if (cosmosBulkOperationResponse.getException() != null) { logger.error("Bulk operation failed", cosmosBulkOperationResponse.getException()); fail(cosmosBulkOperationResponse.getException().toString()); } assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); EncryptionPojo item = cosmosBulkItemResponse.getItem(EncryptionPojo.class); validateResponse(item, idToItemMap.get(item.getId())); } ; assertThat(processedDoc.get()).isEqualTo(totalRequest); } private void createItemsAndVerify(List<CosmosItemOperation> cosmosItemOperations) { List<CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest>> createResponseFlux = Lists.newArrayList(this.cosmosEncryptionContainer. executeBulkOperations(cosmosItemOperations)); Set<String> distinctIndex = new HashSet<>(); AtomicInteger processedDoc = new AtomicInteger(0); for (CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest> cosmosBulkOperationResponse : createResponseFlux) { processedDoc.incrementAndGet(); CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse(); if (cosmosBulkOperationResponse.getException() != null) { logger.error("Bulk operation failed", cosmosBulkOperationResponse.getException()); fail(cosmosBulkOperationResponse.getException().toString()); } assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); EncryptionPojo encryptionPojo = cosmosBulkItemResponse.getItem(EncryptionPojo.class); distinctIndex.add(encryptionPojo.getId()); } ; assertThat(processedDoc.get()).isEqualTo(cosmosItemOperations.size()); assertThat(distinctIndex.size()).isEqualTo(cosmosItemOperations.size()); } }
class EncryptionSyncApiCrudTest extends TestSuiteBase { private CosmosClient client; private CosmosEncryptionClient cosmosEncryptionClient; private CosmosEncryptionContainer cosmosEncryptionContainer; @Factory(dataProvider = "clientBuildersWithSessionConsistency") public EncryptionSyncApiCrudTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @BeforeClass(groups = {"encryption"}, timeOut = SETUP_TIMEOUT) public void before_CosmosItemTest() { assertThat(this.client).isNull(); this.client = getClientBuilder().buildClient(); EncryptionAsyncApiCrudTest.TestEncryptionKeyStoreProvider encryptionKeyStoreProvider = new EncryptionAsyncApiCrudTest.TestEncryptionKeyStoreProvider(); this.cosmosEncryptionClient = CosmosEncryptionClient.createCosmosEncryptionClient(this.client, encryptionKeyStoreProvider); this.cosmosEncryptionContainer = getSharedSyncEncryptionContainer(this.cosmosEncryptionClient); } @AfterClass(groups = {"encryption"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { assertThat(this.client).isNotNull(); this.client.close(); } @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void createItemEncrypt_readItemDecrypt() { EncryptionPojo properties = getItem(UUID.randomUUID().toString()); CosmosItemResponse<EncryptionPojo> itemResponse = this.cosmosEncryptionContainer.createItem(properties, new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()); assertThat(itemResponse.getRequestCharge()).isGreaterThan(0); EncryptionPojo responseItem = itemResponse.getItem(); validateResponse(properties, responseItem); EncryptionPojo readItem = this.cosmosEncryptionContainer.readItem(properties.getId(), new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions(), EncryptionPojo.class).getItem(); validateResponse(properties, readItem); properties = getItem(UUID.randomUUID().toString()); String longString = ""; for (int i = 0; i < 10000; i++) { longString += "a"; } properties.setSensitiveString(longString); itemResponse = cosmosEncryptionContainer.createItem(properties, new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()); assertThat(itemResponse.getRequestCharge()).isGreaterThan(0); responseItem = itemResponse.getItem(); validateResponse(properties, responseItem); } @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void upsertItem_readItem() { EncryptionPojo properties = getItem(UUID.randomUUID().toString()); CosmosItemResponse<EncryptionPojo> itemResponse = this.cosmosEncryptionContainer.upsertItem(properties, new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()); assertThat(itemResponse.getRequestCharge()).isGreaterThan(0); EncryptionPojo responseItem = itemResponse.getItem(); validateResponse(properties, responseItem); EncryptionPojo readItem = this.cosmosEncryptionContainer.readItem(properties.getId(), new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions(), EncryptionPojo.class).getItem(); validateResponse(properties, readItem); } @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void queryItems() { EncryptionPojo properties = getItem(UUID.randomUUID().toString()); CosmosItemResponse<EncryptionPojo> itemResponse = this.cosmosEncryptionContainer.createItem(properties, new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()); assertThat(itemResponse.getRequestCharge()).isGreaterThan(0); EncryptionPojo responseItem = itemResponse.getItem(); validateResponse(properties, responseItem); String query = String.format("SELECT * from c where c.id = '%s'", properties.getId()); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); SqlQuerySpec querySpec = new SqlQuerySpec(query); CosmosPagedIterable<EncryptionPojo> feedResponseIterator = this.cosmosEncryptionContainer.queryItems(querySpec, cosmosQueryRequestOptions, EncryptionPojo.class); List<EncryptionPojo> feedResponse = new ArrayList<>(); feedResponseIterator.iterator().forEachRemaining(pojo -> { feedResponse.add(pojo); }); assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1); for (EncryptionPojo pojo : feedResponse) { if (pojo.getId().equals(properties.getId())) { validateResponse(pojo, responseItem); } } } @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void queryItemsOnEncryptedProperties() { EncryptionPojo properties = getItem(UUID.randomUUID().toString()); CosmosItemResponse<EncryptionPojo> itemResponse = this.cosmosEncryptionContainer.createItem(properties, new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()); assertThat(itemResponse.getRequestCharge()).isGreaterThan(0); EncryptionPojo responseItem = itemResponse.getItem(); validateResponse(properties, responseItem); String query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" + " " + "@nonSensitive and c.sensitiveLong = @sensitiveLong"); SqlQuerySpec querySpec = new SqlQuerySpec(query); SqlParameter parameter1 = new SqlParameter("@nonSensitive", properties.getNonSensitive()); querySpec.getParameters().add(parameter1); SqlParameter parameter2 = new SqlParameter("@sensitiveString", properties.getSensitiveString()); SqlParameter parameter3 = new SqlParameter("@sensitiveLong", properties.getSensitiveLong()); SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec); sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2); sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveLong", parameter3); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); CosmosPagedIterable<EncryptionPojo> feedResponseIterator = this.cosmosEncryptionContainer.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption, cosmosQueryRequestOptions, EncryptionPojo.class); List<EncryptionPojo> feedResponse = new ArrayList<>(); feedResponseIterator.iterator().forEachRemaining(pojo -> { feedResponse.add(pojo); }); assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1); for (EncryptionPojo pojo : feedResponse) { if (pojo.getId().equals(properties.getId())) { validateResponse(pojo, responseItem); } } } @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void queryItemsOnRandomizedEncryption() { EncryptionPojo properties = getItem(UUID.randomUUID().toString()); CosmosItemResponse<EncryptionPojo> itemResponse = this.cosmosEncryptionContainer.createItem(properties, new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()); assertThat(itemResponse.getRequestCharge()).isGreaterThan(0); EncryptionPojo responseItem = itemResponse.getItem(); validateResponse(properties, responseItem); String query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" + " " + "@nonSensitive and c.sensitiveDouble = @sensitiveDouble"); SqlQuerySpec querySpec = new SqlQuerySpec(query); SqlParameter parameter1 = new SqlParameter("@nonSensitive", properties.getNonSensitive()); querySpec.getParameters().add(parameter1); SqlParameter parameter2 = new SqlParameter("@sensitiveString", properties.getSensitiveString()); SqlParameter parameter3 = new SqlParameter("@sensitiveDouble", properties.getSensitiveDouble()); SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec); sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2); sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveDouble", parameter3); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); CosmosPagedIterable<EncryptionPojo> feedResponseIterator = this.cosmosEncryptionContainer.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption, cosmosQueryRequestOptions, EncryptionPojo.class); try { List<EncryptionPojo> feedResponse = new ArrayList<>(); feedResponseIterator.iterator().forEachRemaining(pojo -> { feedResponse.add(pojo); }); fail("Query on randomized parameter should fail"); } catch (IllegalArgumentException ex) { assertThat(ex.getMessage()).contains("Path /sensitiveDouble cannot be used in the " + "query because of randomized encryption"); } } @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void queryItemsWithContinuationTokenAndPageSize() throws Exception { List<String> actualIds = new ArrayList<>(); EncryptionPojo properties = getItem(UUID.randomUUID().toString()); this.cosmosEncryptionContainer.createItem(properties, new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()); actualIds.add(properties.getId()); properties = getItem(UUID.randomUUID().toString()); this.cosmosEncryptionContainer.createItem(properties, new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()); actualIds.add(properties.getId()); properties = getItem(UUID.randomUUID().toString()); this.cosmosEncryptionContainer.createItem(properties, new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()); actualIds.add(properties.getId()); String query = String.format("SELECT * from c where c.id in ('%s', '%s', '%s')", actualIds.get(0), actualIds.get(1), actualIds.get(2)); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); String continuationToken = null; int pageSize = 1; int initialDocumentCount = 3; int finalDocumentCount = 0; CosmosPagedIterable<EncryptionPojo> pojoCosmosPagedIterable = this.cosmosEncryptionContainer.queryItems(query, cosmosQueryRequestOptions, EncryptionPojo.class); do { Iterable<FeedResponse<EncryptionPojo>> feedResponseIterable = pojoCosmosPagedIterable.iterableByPage(continuationToken, 1); for (FeedResponse<EncryptionPojo> fr : feedResponseIterable) { int resultSize = fr.getResults().size(); assertThat(resultSize).isEqualTo(pageSize); finalDocumentCount += fr.getResults().size(); continuationToken = fr.getContinuationToken(); } } while (continuationToken != null); assertThat(finalDocumentCount).isEqualTo(initialDocumentCount); } @Test(groups = {"encryption"}, timeOut = TIMEOUT) @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void batchExecution() { String itemId = UUID.randomUUID().toString(); EncryptionPojo createPojo = getItem(itemId); EncryptionPojo replacePojo = getItem(itemId); replacePojo.setSensitiveString("ReplacedSensitiveString"); CosmosBatch cosmosBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId)); cosmosBatch.createItemOperation(createPojo); cosmosBatch.replaceItemOperation(itemId, replacePojo); cosmosBatch.upsertItemOperation(createPojo); cosmosBatch.readItemOperation(itemId); cosmosBatch.deleteItemOperation(itemId); CosmosBatchResponse batchResponse = this.cosmosEncryptionContainer.executeCosmosBatch(cosmosBatch); assertThat(batchResponse.getResults().size()).isEqualTo(5); assertThat(batchResponse.getResults().get(0).getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(batchResponse.getResults().get(1).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); assertThat(batchResponse.getResults().get(2).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); assertThat(batchResponse.getResults().get(3).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); assertThat(batchResponse.getResults().get(4).getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code()); validateResponse(batchResponse.getResults().get(0).getItem(EncryptionPojo.class), createPojo); validateResponse(batchResponse.getResults().get(1).getItem(EncryptionPojo.class), replacePojo); validateResponse(batchResponse.getResults().get(2).getItem(EncryptionPojo.class), createPojo); validateResponse(batchResponse.getResults().get(3).getItem(EncryptionPojo.class), createPojo); } @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void batchExecutionWithOptionsApi() { String itemId = UUID.randomUUID().toString(); EncryptionPojo createPojo = getItem(itemId); EncryptionPojo replacePojo = getItem(itemId); replacePojo.setSensitiveString("ReplacedSensitiveString"); CosmosBatch cosmosBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId)); CosmosBatchItemRequestOptions cosmosBatchItemRequestOptions = new CosmosBatchItemRequestOptions(); cosmosBatch.createItemOperation(createPojo, cosmosBatchItemRequestOptions); cosmosBatch.replaceItemOperation(itemId, replacePojo, cosmosBatchItemRequestOptions); cosmosBatch.upsertItemOperation(createPojo, cosmosBatchItemRequestOptions); cosmosBatch.readItemOperation(itemId, cosmosBatchItemRequestOptions); cosmosBatch.deleteItemOperation(itemId, cosmosBatchItemRequestOptions); CosmosBatchResponse batchResponse = this.cosmosEncryptionContainer.executeCosmosBatch(cosmosBatch, new CosmosBatchRequestOptions()); assertThat(batchResponse.getResults().size()).isEqualTo(5); assertThat(batchResponse.getResults().get(0).getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(batchResponse.getResults().get(1).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); assertThat(batchResponse.getResults().get(2).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); assertThat(batchResponse.getResults().get(3).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); assertThat(batchResponse.getResults().get(4).getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code()); validateResponse(batchResponse.getResults().get(0).getItem(EncryptionPojo.class), createPojo); validateResponse(batchResponse.getResults().get(1).getItem(EncryptionPojo.class), replacePojo); validateResponse(batchResponse.getResults().get(2).getItem(EncryptionPojo.class), createPojo); validateResponse(batchResponse.getResults().get(3).getItem(EncryptionPojo.class), createPojo); } @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void patchItem() { String itemId = UUID.randomUUID().toString(); EncryptionPojo createPojo = getItem(itemId); CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionContainer.createItem(createPojo, new PartitionKey(createPojo.getMypk()), new CosmosItemRequestOptions()); int originalSensitiveInt = createPojo.getSensitiveInt(); int newSensitiveInt = originalSensitiveInt + 1; String itemIdToReplace = UUID.randomUUID().toString(); EncryptionPojo nestedEncryptionPojoToReplace = getItem(itemIdToReplace); nestedEncryptionPojoToReplace.setSensitiveString("testing"); CosmosPatchOperations cosmosPatchOperations = CosmosPatchOperations.create(); cosmosPatchOperations.add("/sensitiveString", "patched"); cosmosPatchOperations.remove("/sensitiveDouble"); cosmosPatchOperations.replace("/sensitiveInt", newSensitiveInt); cosmosPatchOperations.replace("/sensitiveNestedPojo", nestedEncryptionPojoToReplace); cosmosPatchOperations.set("/sensitiveBoolean", false); CosmosPatchItemRequestOptions options = new CosmosPatchItemRequestOptions(); CosmosItemResponse<EncryptionPojo> response = this.cosmosEncryptionContainer.patchItem( createPojo.getId(), new PartitionKey(createPojo.getMypk()), cosmosPatchOperations, options, EncryptionPojo.class); assertThat(response.getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); EncryptionPojo patchedItem = response.getItem(); assertThat(patchedItem).isNotNull(); assertThat(patchedItem.getSensitiveString()).isEqualTo("patched"); assertThat(patchedItem.getSensitiveDouble()).isNull(); assertThat(patchedItem.getSensitiveNestedPojo()).isNotNull(); assertThat(patchedItem.getSensitiveInt()).isEqualTo(newSensitiveInt); assertThat(patchedItem.isSensitiveBoolean()).isEqualTo(false); response = this.cosmosEncryptionContainer.readItem( createPojo.getId(), new PartitionKey(createPojo.getMypk()), options, EncryptionPojo.class); assertThat(response.getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); validateResponse(patchedItem, response.getItem()); } private int getTotalRequest() { int countRequest = new Random().nextInt(100) + 120; logger.info("Total count of request for this test case: " + countRequest); return countRequest; } @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void bulkExecution_createItem() { int totalRequest = getTotalRequest(); Map<String, EncryptionPojo> idToItemMap = new HashMap<>(); List<CosmosItemOperation> cosmosItemOperationsList = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String itemId = UUID.randomUUID().toString(); EncryptionPojo createPojo = getItem(itemId); idToItemMap.put(itemId, createPojo); cosmosItemOperationsList.add(CosmosBulkOperations.getCreateItemOperation(createPojo, new PartitionKey(createPojo.getMypk()))); } List<CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest>> bulkResponse = Lists.newArrayList(this.cosmosEncryptionContainer. executeBulkOperations(cosmosItemOperationsList)); AtomicInteger processedDoc = new AtomicInteger(0); for (CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest> cosmosBulkOperationResponse : bulkResponse) { processedDoc.incrementAndGet(); CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse(); if (cosmosBulkOperationResponse.getException() != null) { logger.error("Bulk operation failed", cosmosBulkOperationResponse.getException()); fail(cosmosBulkOperationResponse.getException().toString()); } assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); EncryptionPojo item = cosmosBulkItemResponse.getItem(EncryptionPojo.class); validateResponse(item, idToItemMap.get(item.getId())); } assertThat(processedDoc.get()).isEqualTo(totalRequest); } @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void bulkExecution_upsertItem() { int totalRequest = getTotalRequest(); Map<String, EncryptionPojo> idToItemMap = new HashMap<>(); List<CosmosItemOperation> cosmosItemOperationsList = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String itemId = UUID.randomUUID().toString(); EncryptionPojo createPojo = getItem(itemId); idToItemMap.put(itemId, createPojo); cosmosItemOperationsList.add(CosmosBulkOperations.getUpsertItemOperation(createPojo, new PartitionKey(createPojo.getMypk()))); } List<CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest>> bulkResponse = Lists.newArrayList(this.cosmosEncryptionContainer. executeBulkOperations(cosmosItemOperationsList, new CosmosBulkExecutionOptions())); AtomicInteger processedDoc = new AtomicInteger(0); for (CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest> cosmosBulkOperationResponse : bulkResponse) { processedDoc.incrementAndGet(); CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse(); if (cosmosBulkOperationResponse.getException() != null) { logger.error("Bulk operation failed", cosmosBulkOperationResponse.getException()); fail(cosmosBulkOperationResponse.getException().toString()); } assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); EncryptionPojo item = cosmosBulkItemResponse.getItem(EncryptionPojo.class); validateResponse(item, idToItemMap.get(item.getId())); } ; assertThat(processedDoc.get()).isEqualTo(totalRequest); } @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void bulkExecution_deleteItem() { int totalRequest = Math.min(getTotalRequest(), 20); List<CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String itemId = UUID.randomUUID().toString(); EncryptionPojo createPojo = getItem(itemId); cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(createPojo, new PartitionKey(createPojo.getMypk()))); } createItemsAndVerify(cosmosItemOperations); List<CosmosItemOperation> deleteCosmosItemOperations = new ArrayList<>(); for (CosmosItemOperation cosmosItemOperation : cosmosItemOperations) { EncryptionPojo encryptionPojo = cosmosItemOperation.getItem(); deleteCosmosItemOperations.add(CosmosBulkOperations.getDeleteItemOperation(encryptionPojo.getId(), cosmosItemOperation.getPartitionKeyValue())); } List<CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest>> bulkResponse = Lists.newArrayList(this.cosmosEncryptionContainer .executeBulkOperations(deleteCosmosItemOperations)); AtomicInteger processedDoc = new AtomicInteger(0); for (CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest> cosmosBulkOperationResponse : bulkResponse) { processedDoc.incrementAndGet(); CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse(); if (cosmosBulkOperationResponse.getException() != null) { logger.error("Bulk operation failed", cosmosBulkOperationResponse.getException()); fail(cosmosBulkOperationResponse.getException().toString()); } assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code()); assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); } ; assertThat(processedDoc.get()).isEqualTo(totalRequest); } @Test(groups = {"encryption"}, timeOut = TIMEOUT) public void bulkExecution_readItem() { int totalRequest = getTotalRequest(); List<CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); Map<String, EncryptionPojo> idToItemMap = new HashMap<>(); for (int i = 0; i < totalRequest; i++) { String itemId = UUID.randomUUID().toString(); EncryptionPojo createPojo = getItem(itemId); idToItemMap.put(itemId, createPojo); cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(createPojo, new PartitionKey(createPojo.getMypk()))); } createItemsAndVerify(cosmosItemOperations); List<CosmosItemOperation> readCosmosItemOperations = new ArrayList<>(); for (CosmosItemOperation cosmosItemOperation : cosmosItemOperations) { EncryptionPojo encryptionPojo = cosmosItemOperation.getItem(); readCosmosItemOperations.add(CosmosBulkOperations.getReadItemOperation(encryptionPojo.getId(), cosmosItemOperation.getPartitionKeyValue())); } List<CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest>> bulkResponse = Lists.newArrayList(this.cosmosEncryptionContainer .executeBulkOperations(readCosmosItemOperations)); AtomicInteger processedDoc = new AtomicInteger(0); for (CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest> cosmosBulkOperationResponse : bulkResponse) { processedDoc.incrementAndGet(); CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse(); if (cosmosBulkOperationResponse.getException() != null) { logger.error("Bulk operation failed", cosmosBulkOperationResponse.getException()); fail(cosmosBulkOperationResponse.getException().toString()); } assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); EncryptionPojo item = cosmosBulkItemResponse.getItem(EncryptionPojo.class); validateResponse(item, idToItemMap.get(item.getId())); } ; assertThat(processedDoc.get()).isEqualTo(totalRequest); } private void createItemsAndVerify(List<CosmosItemOperation> cosmosItemOperations) { List<CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest>> createResponseFlux = Lists.newArrayList(this.cosmosEncryptionContainer. executeBulkOperations(cosmosItemOperations)); Set<String> distinctIndex = new HashSet<>(); AtomicInteger processedDoc = new AtomicInteger(0); for (CosmosBulkOperationResponse<EncryptionAsyncApiCrudTest> cosmosBulkOperationResponse : createResponseFlux) { processedDoc.incrementAndGet(); CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse(); if (cosmosBulkOperationResponse.getException() != null) { logger.error("Bulk operation failed", cosmosBulkOperationResponse.getException()); fail(cosmosBulkOperationResponse.getException().toString()); } assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); EncryptionPojo encryptionPojo = cosmosBulkItemResponse.getItem(EncryptionPojo.class); distinctIndex.add(encryptionPojo.getId()); } ; assertThat(processedDoc.get()).isEqualTo(cosmosItemOperations.size()); assertThat(distinctIndex.size()).isEqualTo(cosmosItemOperations.size()); } }
why we remove the beta tag here?
public Mono<CosmosBatchResponse> executeCosmosBatch(CosmosBatch cosmosBatch) { return this.executeCosmosBatch(cosmosBatch, new CosmosBatchRequestOptions()); }
}
public Mono<CosmosBatchResponse> executeCosmosBatch(CosmosBatch cosmosBatch) { return this.executeCosmosBatch(cosmosBatch, new CosmosBatchRequestOptions()); }
class type. * @return a {@link CosmosPagedFlux}
class type. * @return a {@link CosmosPagedFlux}
why we need to * 2 here?
public void queryDiagnosticsOnOrderBy() { String containerId = "testcontainer"; cosmosAsyncDatabase.createContainer(containerId, "/mypk", ThroughputProperties.createManualThroughput(40000)).block(); CosmosAsyncContainer testcontainer = cosmosAsyncDatabase.getContainer(containerId); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); testcontainer.createItem(getInternalObjectNode()).block(); options.setMaxDegreeOfParallelism(-1); String query = "SELECT * from c ORDER BY c._ts DESC"; CosmosPagedFlux<InternalObjectNode> cosmosPagedFlux = testcontainer.queryItems(query, options, InternalObjectNode.class); AtomicInteger counterPkRid = new AtomicInteger(); AtomicInteger counterPartitionKeyRangeId = new AtomicInteger(); cosmosPagedFlux.byPage().flatMap(feedResponse -> { String cosmosDiagnosticsString = feedResponse.getCosmosDiagnostics().toString(); Pattern pattern = Pattern.compile("\"partitionKeyRangeId\":\""); Matcher matcher = pattern.matcher(cosmosDiagnosticsString); while (matcher.find()) { counterPartitionKeyRangeId.incrementAndGet(); } pattern = Pattern.compile("pkrId:"); matcher = pattern.matcher(cosmosDiagnosticsString); while (matcher.find()) { counterPkRid.incrementAndGet(); } return Flux.just(feedResponse); }).blockLast(); assertThat(counterPkRid.get() * 2).isEqualTo(counterPartitionKeyRangeId.get()); deleteCollection(testcontainer); }
assertThat(counterPkRid.get() * 2).isEqualTo(counterPartitionKeyRangeId.get());
public void queryDiagnosticsOnOrderBy() { String containerId = "testcontainer"; cosmosAsyncDatabase.createContainer(containerId, "/mypk", ThroughputProperties.createManualThroughput(40000)).block(); CosmosAsyncContainer testcontainer = cosmosAsyncDatabase.getContainer(containerId); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setConsistencyLevel(ConsistencyLevel.EVENTUAL); testcontainer.createItem(getInternalObjectNode()).block(); options.setMaxDegreeOfParallelism(-1); String query = "SELECT * from c ORDER BY c._ts DESC"; CosmosPagedFlux<InternalObjectNode> cosmosPagedFlux = testcontainer.queryItems(query, options, InternalObjectNode.class); Set<String> partitionKeyRangeIds = new HashSet<>(); Set<String> pkRids = new HashSet<>(); cosmosPagedFlux.byPage().flatMap(feedResponse -> { String cosmosDiagnosticsString = feedResponse.getCosmosDiagnostics().toString(); Pattern pattern = Pattern.compile("(\"partitionKeyRangeId\":\")(\\d)"); Matcher matcher = pattern.matcher(cosmosDiagnosticsString); while (matcher.find()) { String group = matcher.group(2); partitionKeyRangeIds.add(group); } pattern = Pattern.compile("(pkrId:)(\\d)"); matcher = pattern.matcher(cosmosDiagnosticsString); while (matcher.find()) { String group = matcher.group(2); pkRids.add(group); } return Flux.just(feedResponse); }).blockLast(); assertThat(pkRids).isNotEmpty(); assertThat(pkRids).isEqualTo(partitionKeyRangeIds); deleteCollection(testcontainer); }
class CosmosDiagnosticsTest extends TestSuiteBase { private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private static final DateTimeFormatter RESPONSE_TIME_FORMATTER = DateTimeFormatter.ISO_INSTANT; private CosmosClient gatewayClient; private CosmosClient directClient; private CosmosAsyncDatabase cosmosAsyncDatabase; private CosmosContainer container; private CosmosAsyncContainer cosmosAsyncContainer; @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) public void beforeClass() { assertThat(this.gatewayClient).isNull(); gatewayClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); directClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(this.gatewayClient.asyncClient()); cosmosAsyncDatabase = directClient.asyncClient().getDatabase(cosmosAsyncContainer.getDatabase().getId()); container = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); } @AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { if (this.gatewayClient != null) { this.gatewayClient.close(); } if (this.directClient != null) { this.directClient.close(); } } @DataProvider(name = "query") private Object[][] query() { return new Object[][]{ new Object[] { "Select * from c where c.id = 'wrongId'", true }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", true }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", true }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", true }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", true }, new Object[] { "Select * from c where c.id = 'wrongId'", false }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId'", false }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false }, }; } @DataProvider(name = "readAllItemsOfLogicalPartition") private Object[][] readAllItemsOfLogicalPartition() { return new Object[][]{ new Object[] { 1, true }, new Object[] { 5, null }, new Object[] { 20, null }, new Object[] { 1, false }, new Object[] { 5, false }, new Object[] { 20, false }, }; } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void gatewayDiagnostics() throws Exception { CosmosClient testGatewayClient = null; try { testGatewayClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); CosmosContainer container = testGatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"operationType\":\"Create\""); assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\""); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotNull(); validateTransportRequestTimelineGateway(diagnostics); validateRegionContacted(createResponse.getDiagnostics(), testGatewayClient.asyncClient()); isValidJSON(diagnostics); } finally { if (testGatewayClient != null) { testGatewayClient.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void gatewayDiagnosticsOnException() throws Exception { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = null; try { createResponse = this.container.createItem(internalObjectNode); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey")); CosmosItemResponse<InternalObjectNode> readResponse = this.container.readItem(BridgeInternal.getProperties(createResponse).getId(), new PartitionKey("wrongPartitionKey"), InternalObjectNode.class); fail("request should fail as partition key is wrong"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"statusCode\":404"); assertThat(diagnostics).contains("\"operationType\":\"Read\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotNull(); validateRegionContacted(createResponse.getDiagnostics(), this.container.asyncContainer.getDatabase().getClient()); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); validateTransportRequestTimelineGateway(diagnostics); isValidJSON(diagnostics); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void systemDiagnosticsForSystemStateInformation() { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = this.container.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("systemInformation"); assertThat(diagnostics).contains("usedMemory"); assertThat(diagnostics).contains("availableMemory"); assertThat(diagnostics).contains("systemCpuLoad"); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnostics() throws Exception { CosmosClient testDirectClient = null; try { testDirectClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer cosmosContainer = testDirectClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("supplementalResponseStatisticsList"); assertThat(diagnostics).contains("\"gatewayStatistics\":null"); assertThat(diagnostics).contains("addressResolutionStatistics"); assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\""); assertThat(diagnostics).contains("\"metaDataName\":\"PARTITION_KEY_RANGE_LOOK_UP\""); assertThat(diagnostics).contains("\"metaDataName\":\"SERVER_ADDRESS_LOOKUP\""); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).contains("\"backendLatencyInMs\""); assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotEmpty(); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); validateTransportRequestTimelineDirect(diagnostics); validateRegionContacted(createResponse.getDiagnostics(), testDirectClient.asyncClient()); isValidJSON(diagnostics); try { cosmosContainer.createItem(internalObjectNode); fail("expected 409"); } catch (CosmosException e) { diagnostics = e.getDiagnostics().toString(); assertThat(diagnostics).contains("\"backendLatencyInMs\""); validateTransportRequestTimelineDirect(e.getDiagnostics().toString()); } } finally { if (testDirectClient != null) { testDirectClient.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryPlanDiagnostics() throws JsonProcessingException { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); List<String> itemIdList = new ArrayList<>(); for(int i = 0; i< 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); if(i%20 == 0) { itemIdList.add(internalObjectNode.getId()); } } String queryDiagnostics = null; List<String> queryList = new ArrayList<>(); queryList.add("Select * from c"); StringBuilder queryBuilder = new StringBuilder("SELECT * from c where c.mypk in ("); for(int i = 0 ; i < itemIdList.size(); i++){ queryBuilder.append("'").append(itemIdList.get(i)).append("'"); if(i < (itemIdList.size()-1)) { queryBuilder.append(","); } else { queryBuilder.append(")"); } } queryList.add(queryBuilder.toString()); queryList.add("Select * from c where c.id = 'wrongId'"); for(String query : queryList) { int feedResponseCounter = 0; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(true); Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); if (feedResponseCounter == 0) { assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)="); String requestTimeLine = OBJECT_MAPPER.writeValueAsString(feedResponse.getCosmosDiagnostics().getFeedResponseDiagnostics().getQueryPlanDiagnosticsContext().getRequestTimeline()); assertThat(requestTimeLine).contains("connectionConfigured"); assertThat(requestTimeLine).contains("requestSent"); assertThat(requestTimeLine).contains("transitTime"); assertThat(requestTimeLine).contains("received"); } else { assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan RequestTimeline ="); } feedResponseCounter++; } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryMetricsWithIndexMetrics() { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); List<String> itemIdList = new ArrayList<>(); for(int i = 0; i< 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); if(i%20 == 0) { itemIdList.add(internalObjectNode.getId()); } } String queryDiagnostics = null; List<String> queryList = new ArrayList<>(); StringBuilder queryBuilder = new StringBuilder("SELECT * from c where c.mypk in ("); for(int i = 0 ; i < itemIdList.size(); i++){ queryBuilder.append("'").append(itemIdList.get(i)).append("'"); if(i < (itemIdList.size()-1)) { queryBuilder.append(","); } else { queryBuilder.append(")"); } } queryList.add(queryBuilder.toString()); for (String query : queryList) { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(true); options.setIndexMetricsEnabled(true); Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); logger.info("This is query diagnostics {}", queryDiagnostics); if (feedResponse.getResponseHeaders().containsKey(HttpConstants.HttpHeaders.INDEX_UTILIZATION)) { assertThat(feedResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.INDEX_UTILIZATION)).isNotNull(); assertThat(createFromJSONString(Utils.decodeBase64String(feedResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.INDEX_UTILIZATION))).getUtilizedSingleIndexes()).isNotNull(); } } } } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT) public void queryMetrics(String query, Boolean qmEnabled) { CosmosContainer directContainer = this.directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()) .getContainer(this.cosmosAsyncContainer.getId()); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } boolean qroupByFirstResponse = true; Iterator<FeedResponse<InternalObjectNode>> iterator = directContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); assertThat(iterator.hasNext()).isTrue(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); assertThat(feedResponse.getResults().size()).isEqualTo(0); if (!query.contains("group by") || qroupByFirstResponse) { validateQueryDiagnostics(queryDiagnostics, qmEnabled, true); validateDirectModeQueryDiagnostics(queryDiagnostics); if (query.contains("group by")) { qroupByFirstResponse = false; } } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) private void validateDirectModeQueryDiagnostics(String diagnostics) { assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("supplementalResponseStatisticsList"); assertThat(diagnostics).contains("responseStatisticsList"); assertThat(diagnostics).contains("\"gatewayStatistics\":null"); assertThat(diagnostics).contains("addressResolutionStatistics"); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); } private void validateGatewayModeQueryDiagnostics(String diagnostics) { assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"operationType\":\"Query\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).contains("\"regionsContacted\""); } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT*2) public void queryDiagnosticsGatewayMode(String query, Boolean qmEnabled) { CosmosClient testDirectClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); CosmosContainer cosmosContainer = testDirectClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()) .getContainer(cosmosAsyncContainer.getId()); List<String> itemIdList = new ArrayList<>(); for (int i = 0; i < 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); if (i % 20 == 0) { itemIdList.add(internalObjectNode.getId()); } } boolean qroupByFirstResponse = true; if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer .queryItems(query, options, InternalObjectNode.class) .iterableByPage() .iterator(); assertThat(iterator.hasNext()).isTrue(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); assertThat(feedResponse.getResults().size()).isEqualTo(0); if (!query.contains("group by") || qroupByFirstResponse) { validateQueryDiagnostics(queryDiagnostics, qmEnabled, true); validateGatewayModeQueryDiagnostics(queryDiagnostics); if (query.contains("group by")) { qroupByFirstResponse = false; } } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryMetricsWithADifferentLocale() { Locale.setDefault(Locale.GERMAN); String query = "select * from root where root.id= \"someid\""; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); Iterator<FeedResponse<InternalObjectNode>> iterator = this.container.queryItems(query, options, InternalObjectNode.class) .iterableByPage().iterator(); double requestCharge = 0; while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); requestCharge += feedResponse.getRequestCharge(); } assertThat(requestCharge).isGreaterThan(0); Locale.setDefault(Locale.ROOT); } private static void validateQueryDiagnostics( String queryDiagnostics, Boolean qmEnabled, boolean expectQueryPlanDiagnostics) { if (qmEnabled == null || qmEnabled) { assertThat(queryDiagnostics).contains("Retrieved Document Count"); assertThat(queryDiagnostics).contains("Query Preparation Times"); assertThat(queryDiagnostics).contains("Runtime Execution Times"); assertThat(queryDiagnostics).contains("Partition Execution Timeline"); } else { assertThat(queryDiagnostics).doesNotContain("Retrieved Document Count"); assertThat(queryDiagnostics).doesNotContain("Query Preparation Times"); assertThat(queryDiagnostics).doesNotContain("Runtime Execution Times"); assertThat(queryDiagnostics).doesNotContain("Partition Execution Timeline"); } if (expectQueryPlanDiagnostics) { assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)="); } else { assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)="); } } @Test(groups = {"simple"}, dataProvider = "readAllItemsOfLogicalPartition", timeOut = TIMEOUT) public void queryMetricsForReadAllItemsOfLogicalPartition(Integer expectedItemCount, Boolean qmEnabled) { String pkValue = UUID.randomUUID().toString(); for (int i = 0; i < expectedItemCount; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(pkValue); CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(internalObjectNode); } CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options = options.setQueryMetricsEnabled(qmEnabled); } ModelBridgeInternal.setQueryRequestOptionsMaxItemCount(options, 5); Iterator<FeedResponse<InternalObjectNode>> iterator = this.container .readAllItems( new PartitionKey(pkValue), options, InternalObjectNode.class) .iterableByPage().iterator(); assertThat(iterator.hasNext()).isTrue(); int actualItemCount = 0; while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); actualItemCount += feedResponse.getResults().size(); validateQueryDiagnostics(queryDiagnostics, qmEnabled, false); } assertThat(actualItemCount).isEqualTo(expectedItemCount); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnosticsOnException() throws Exception { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = null; CosmosClient client = null; try { client = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); createResponse = container.createItem(internalObjectNode); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey")); CosmosItemResponse<InternalObjectNode> readResponse = cosmosContainer.readItem(BridgeInternal.getProperties(createResponse).getId(), new PartitionKey("wrongPartitionKey"), InternalObjectNode.class); fail("request should fail as partition key is wrong"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(exception.getDiagnostics().getContactedRegionNames()).isNotEmpty(); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); assertThat(diagnostics).contains("\"backendLatencyInMs\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); isValidJSON(diagnostics); validateTransportRequestTimelineDirect(diagnostics); validateRegionContacted(createResponse.getDiagnostics(), client.asyncClient()); } finally { if (client != null) { client.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnosticsOnMetadataException() { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosClient client = null; try { client = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); HttpClient mockHttpClient = Mockito.mock(HttpClient.class); Mockito.when(mockHttpClient.send(Mockito.any(HttpRequest.class), Mockito.any(Duration.class))) .thenReturn(Mono.error(new CosmosException(400, "TestBadRequest"))); RxStoreModel rxGatewayStoreModel = rxGatewayStoreModel = ReflectionUtils.getGatewayProxy((RxDocumentClientImpl) client.asyncClient().getDocClientWrapper()); ReflectionUtils.setGatewayHttpClient(rxGatewayStoreModel, mockHttpClient); container.createItem(internalObjectNode); fail("request should fail as bad request"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.BADREQUEST); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(diagnostics).contains("\"resourceType\":\"DocumentCollection\""); assertThat(exception.getDiagnostics().getContactedRegionNames()).isNotEmpty(); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); isValidJSON(diagnostics); } finally { if (client != null) { client.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void supplementalResponseStatisticsList() throws Exception { ClientSideRequestStatistics clientSideRequestStatistics = new ClientSideRequestStatistics(mockDiagnosticsClientContext(),null); for (int i = 0; i < 15; i++) { RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document); clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null); } List<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); ObjectMapper objectMapper = new ObjectMapper(); String diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics); JsonNode jsonNode = objectMapper.readTree(diagnostics); ArrayNode supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList"); assertThat(storeResponseStatistics.size()).isEqualTo(15); assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(10); clearStoreResponseStatistics(clientSideRequestStatistics); storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); assertThat(storeResponseStatistics.size()).isEqualTo(0); for (int i = 0; i < 7; i++) { RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document); clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null); } storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); objectMapper = new ObjectMapper(); diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics); jsonNode = objectMapper.readTree(diagnostics); supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList"); assertThat(storeResponseStatistics.size()).isEqualTo(7); assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(7); for(JsonNode node : supplementalResponseStatisticsListNode) { assertThat(node.get("storeResult").asText()).isNotNull(); String requestResponseTimeUTC = node.get("requestResponseTimeUTC").asText(); Instant instant = Instant.from(RESPONSE_TIME_FORMATTER.parse(requestResponseTimeUTC)); assertThat(Instant.now().toEpochMilli() - instant.toEpochMilli()).isLessThan(5000); assertThat(node.get("requestResponseTimeUTC")).isNotNull(); assertThat(node.get("requestOperationType")).isNotNull(); assertThat(node.get("requestOperationType")).isNotNull(); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void serializationOnVariousScenarios() { CosmosDatabaseResponse cosmosDatabase = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).read(); String diagnostics = cosmosDatabase.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"DATABASE_DESERIALIZATION\""); CosmosContainerResponse containerResponse = this.container.read(); diagnostics = containerResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"CONTAINER_DESERIALIZATION\""); TestItem testItem = new TestItem(); testItem.id = "TestId"; testItem.mypk = "TestPk"; CosmosItemResponse<TestItem> itemResponse = this.container.createItem(testItem); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); testItem.id = "TestId2"; testItem.mypk = "TestPk"; itemResponse = this.container.createItem(testItem, new PartitionKey("TestPk"), null); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).doesNotContain("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).doesNotContain("\"serializationType\":\"ITEM_DESERIALIZATION\""); TestItem readTestItem = itemResponse.getItem(); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\""); CosmosItemResponse<InternalObjectNode> readItemResponse = this.container.readItem(testItem.id, new PartitionKey(testItem.mypk), null, InternalObjectNode.class); InternalObjectNode properties = readItemResponse.getItem(); diagnostics = readItemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void rntbdRequestResponseLengthStatistics() throws Exception { TestItem testItem = new TestItem(); testItem.id = UUID.randomUUID().toString(); testItem.mypk = UUID.randomUUID().toString(); int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length; CosmosContainer container = directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId()); CosmosItemResponse<TestItem> createItemResponse = container.createItem(testItem); validate(createItemResponse.getDiagnostics(), testItemLength, ModelBridgeInternal.getPayloadLength(createItemResponse)); try { container.createItem(testItem); fail("expected to fail due to 409"); } catch (CosmosException e) { validate(e.getDiagnostics(), testItemLength, 0); } CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class); validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse)); CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null); validate(deleteItemResponse.getDiagnostics(), 0, 0); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void rntbdStatistics() throws Exception { Instant beforeClientInitialization = Instant.now(); CosmosClient client1 = null; try { client1 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .directMode() .buildClient(); TestItem testItem = new TestItem(); testItem.id = UUID.randomUUID().toString(); testItem.mypk = UUID.randomUUID().toString(); int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length; CosmosContainer container = client1.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId()); Thread.sleep(1000); Instant beforeInitializingRntbdServiceEndpoint = Instant.now(); CosmosItemResponse<TestItem> operation1Response = container.upsertItem(testItem); Instant afterInitializingRntbdServiceEndpoint = Instant.now(); Thread.sleep(1000); Instant beforeOperation2 = Instant.now(); CosmosItemResponse<TestItem> operation2Response = container.upsertItem(testItem); Instant afterOperation2 = Instant.now(); Thread.sleep(1000); Instant beforeOperation3 = Instant.now(); CosmosItemResponse<TestItem> operation3Response = container.upsertItem(testItem); Instant afterOperation3 = Instant.now(); validateRntbdStatistics(operation3Response.getDiagnostics(), beforeClientInitialization, beforeInitializingRntbdServiceEndpoint, afterInitializingRntbdServiceEndpoint, beforeOperation2, afterOperation2, beforeOperation3, afterOperation3); CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class); validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse)); CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null); validate(deleteItemResponse.getDiagnostics(), 0, 0); } finally { LifeCycleUtils.closeQuietly(client1); } } private void validateRntbdStatistics(CosmosDiagnostics cosmosDiagnostics, Instant clientInitializationTime, Instant beforeInitializingRntbdServiceEndpoint, Instant afterInitializingRntbdServiceEndpoint, Instant beforeOperation2, Instant afterOperation2, Instant beforeOperation3, Instant afterOperation3) throws Exception { ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString()); JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); assertThat(responseStatisticsList.size()).isGreaterThan(0); JsonNode storeResult = responseStatisticsList.get(0).get("storeResult"); assertThat(storeResult).isNotNull(); boolean hasPayload = storeResult.get("exception").isNull(); assertThat(storeResult.get("channelTaskQueueSize").asInt(-1)).isGreaterThan(0); assertThat(storeResult.get("pendingRequestsCount").asInt(-1)).isGreaterThanOrEqualTo(0); JsonNode serviceEndpointStatistics = storeResult.get("serviceEndpointStatistics"); assertThat(serviceEndpointStatistics).isNotNull(); assertThat(serviceEndpointStatistics.get("availableChannels").asInt(-1)).isGreaterThan(0); assertThat(serviceEndpointStatistics.get("acquiredChannels").asInt(-1)).isEqualTo(0); assertThat(serviceEndpointStatistics.get("inflightRequests").asInt(-1)).isEqualTo(1); assertThat(serviceEndpointStatistics.get("isClosed").asBoolean()).isEqualTo(false); Instant beforeInitializationThreshold = beforeInitializingRntbdServiceEndpoint.minusMillis(1); assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText())) .isAfterOrEqualTo(beforeInitializationThreshold); Instant afterInitializationThreshold = afterInitializingRntbdServiceEndpoint.plusMillis(2); assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText())) .isBeforeOrEqualTo(afterInitializationThreshold); Instant afterOperation2Threshold = afterOperation2.plusMillis(2); Instant beforeOperation2Threshold = beforeOperation2.minusMillis(2); assertThat(Instant.parse(serviceEndpointStatistics.get("lastRequestTime").asText())) .isAfterOrEqualTo(beforeOperation2Threshold) .isBeforeOrEqualTo(afterOperation2Threshold); assertThat(Instant.parse(serviceEndpointStatistics.get("lastSuccessfulRequestTime").asText())) .isAfterOrEqualTo(beforeOperation2Threshold) .isBeforeOrEqualTo(afterOperation2Threshold); } private void validate(CosmosDiagnostics cosmosDiagnostics, int expectedRequestPayloadSize, int expectedResponsePayloadSize) throws Exception { ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString()); JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); assertThat(responseStatisticsList.size()).isGreaterThan(0); JsonNode storeResult = responseStatisticsList.get(0).get("storeResult"); boolean hasPayload = storeResult.get("exception").isNull(); assertThat(storeResult).isNotNull(); assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize); assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize); assertThat(storeResult.get("requestPayloadLengthInBytes").asInt(-1)).isEqualTo(expectedRequestPayloadSize); if (hasPayload) { assertThat(storeResult.get("responsePayloadLengthInBytes").asInt(-1)).isEqualTo(expectedResponsePayloadSize); } assertThat(storeResult.get("rntbdResponseLengthInBytes").asInt(-1)).isGreaterThan(expectedResponsePayloadSize); } @Test(groups = {"emulator"}, timeOut = TIMEOUT) public void addressResolutionStatistics() { CosmosClient client1 = null; CosmosClient client2 = null; String databaseId = DatabaseForTest.generateId(); String containerId = UUID.randomUUID().toString(); CosmosDatabase cosmosDatabase = null; CosmosContainer cosmosContainer = null; try { client1 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); client1.createDatabase(databaseId); cosmosDatabase = client1.getDatabase(databaseId); cosmosDatabase.createContainer(containerId, "/mypk"); InternalObjectNode internalObjectNode = getInternalObjectNode(); cosmosContainer = cosmosDatabase.getContainer(containerId); CosmosItemResponse<InternalObjectNode> writeResourceResponse = cosmosContainer.createItem(internalObjectNode); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics"); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false"); assertThat(writeResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\""); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":null"); assertThat(writeResourceResponse.getDiagnostics().toString()).doesNotContain("\"errorMessage\":\"io.netty" + ".channel.AbstractChannel$AnnotatedConnectException: Connection refused: no further information"); client2 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); cosmosDatabase = client2.getDatabase(databaseId); cosmosContainer = cosmosDatabase.getContainer(containerId); AsyncDocumentClient asyncDocumentClient = client2.asyncClient().getContextClient(); GlobalAddressResolver addressResolver = (GlobalAddressResolver) FieldUtils.readField(asyncDocumentClient, "addressResolver", true); @SuppressWarnings("rawtypes") Map addressCacheByEndpoint = (Map) FieldUtils.readField(addressResolver, "addressCacheByEndpoint", true); Object endpointCache = addressCacheByEndpoint.values().toArray()[0]; GatewayAddressCache addressCache = (GatewayAddressCache) FieldUtils.readField(endpointCache, "addressCache", true); HttpClient httpClient = httpClient(true); FieldUtils.writeField(addressCache, "httpClient", httpClient, true); new Thread(() -> { try { Thread.sleep(5000); HttpClient httpClient1 = httpClient(false); FieldUtils.writeField(addressCache, "httpClient", httpClient1, true); } catch (Exception e) { fail(e.getMessage()); } }).start(); PartitionKey partitionKey = new PartitionKey(internalObjectNode.get("mypk")); CosmosItemResponse<InternalObjectNode> readResourceResponse = cosmosContainer.readItem(internalObjectNode.getId(), partitionKey, new CosmosItemRequestOptions(), InternalObjectNode.class); assertThat(readResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics"); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false"); assertThat(readResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\""); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":null"); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":\"io.netty" + ".channel.AbstractChannel$AnnotatedConnectException: Connection refused"); } catch (Exception ex) { logger.error("Error in test addressResolutionStatistics", ex); fail("This test should not throw exception " + ex); } finally { safeDeleteSyncDatabase(cosmosDatabase); if (client1 != null) { client1.close(); } if (client2 != null) { client2.close(); } } } private InternalObjectNode getInternalObjectNode() { InternalObjectNode internalObjectNode = new InternalObjectNode(); String uuid = UUID.randomUUID().toString(); internalObjectNode.setId(uuid); BridgeInternal.setProperty(internalObjectNode, "mypk", uuid); return internalObjectNode; } private InternalObjectNode getInternalObjectNode(String pkValue) { InternalObjectNode internalObjectNode = new InternalObjectNode(); String uuid = UUID.randomUUID().toString(); internalObjectNode.setId(uuid); BridgeInternal.setProperty(internalObjectNode, "mypk", pkValue); return internalObjectNode; } private List<ClientSideRequestStatistics.StoreResponseStatistics> getStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception { Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList"); storeResponseStatisticsField.setAccessible(true); @SuppressWarnings({"unchecked"}) List<ClientSideRequestStatistics.StoreResponseStatistics> list = (List<ClientSideRequestStatistics.StoreResponseStatistics>) storeResponseStatisticsField.get(requestStatistics); return list; } private void clearStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception { Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList"); storeResponseStatisticsField.setAccessible(true); storeResponseStatisticsField.set(requestStatistics, new ArrayList<ClientSideRequestStatistics.StoreResponseStatistics>()); } private void validateTransportRequestTimelineGateway(String diagnostics) { assertThat(diagnostics).contains("\"eventName\":\"connectionConfigured\""); assertThat(diagnostics).contains("\"eventName\":\"connectionConfigured\""); assertThat(diagnostics).contains("\"eventName\":\"requestSent\""); assertThat(diagnostics).contains("\"eventName\":\"transitTime\""); assertThat(diagnostics).contains("\"eventName\":\"received\""); } private void validateTransportRequestTimelineDirect(String diagnostics) { assertThat(diagnostics).contains("\"eventName\":\"created\""); assertThat(diagnostics).contains("\"eventName\":\"queued\""); assertThat(diagnostics).contains("\"eventName\":\"channelAcquisitionStarted\""); assertThat(diagnostics).contains("\"eventName\":\"pipelined\""); assertThat(diagnostics).contains("\"eventName\":\"transitTime\""); assertThat(diagnostics).contains("\"eventName\":\"received\""); assertThat(diagnostics).contains("\"eventName\":\"completed\""); assertThat(diagnostics).contains("\"startTimeUTC\""); assertThat(diagnostics).contains("\"durationInMicroSec\""); } public void isValidJSON(final String json) { try { final JsonParser parser = new ObjectMapper().createParser(json); while (parser.nextToken() != null) { } } catch (IOException ex) { fail("Diagnostic string is not in json format ", ex); } } private HttpClient httpClient(boolean fakeProxy) { HttpClientConfig httpClientConfig; if(fakeProxy) { httpClientConfig = new HttpClientConfig(new Configs()) .withProxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress("localhost", 8888))); } else { httpClientConfig = new HttpClientConfig(new Configs()); } return HttpClient.createFixed(httpClientConfig); } private IndexUtilizationInfo createFromJSONString(String jsonString) { ObjectMapper indexUtilizationInfoObjectMapper = new ObjectMapper(); IndexUtilizationInfo indexUtilizationInfo = null; try { indexUtilizationInfo = indexUtilizationInfoObjectMapper.readValue(jsonString, IndexUtilizationInfo.class); } catch (JsonProcessingException e) { logger.error("Json not correctly formed ", e); } return indexUtilizationInfo; } private void validateRegionContacted(CosmosDiagnostics cosmosDiagnostics, CosmosAsyncClient cosmosAsyncClient) throws Exception { RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) ReflectionUtils.getAsyncDocumentClient(cosmosAsyncClient); GlobalEndpointManager globalEndpointManager = ReflectionUtils.getGlobalEndpointManager(rxDocumentClient); LocationCache locationCache = ReflectionUtils.getLocationCache(globalEndpointManager); Field locationInfoField = LocationCache.class.getDeclaredField("locationInfo"); locationInfoField.setAccessible(true); Object locationInfo = locationInfoField.get(locationCache); Class<?> DatabaseAccountLocationsInfoClass = Class.forName("com.azure.cosmos.implementation.routing" + ".LocationCache$DatabaseAccountLocationsInfo"); Field availableWriteEndpointByLocation = DatabaseAccountLocationsInfoClass.getDeclaredField( "availableWriteEndpointByLocation"); availableWriteEndpointByLocation.setAccessible(true); @SuppressWarnings("unchecked") Map<String, URI> map = (Map<String, URI>) availableWriteEndpointByLocation.get(locationInfo); String regionName = map.keySet().iterator().next(); assertThat(cosmosDiagnostics.getContactedRegionNames().size()).isEqualTo(1); assertThat(cosmosDiagnostics.getContactedRegionNames().iterator().next()).isEqualTo(regionName.toLowerCase()); } public static class TestItem { public String id; public String mypk; public TestItem() { } } }
class CosmosDiagnosticsTest extends TestSuiteBase { private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private static final DateTimeFormatter RESPONSE_TIME_FORMATTER = DateTimeFormatter.ISO_INSTANT; private CosmosClient gatewayClient; private CosmosClient directClient; private CosmosAsyncDatabase cosmosAsyncDatabase; private CosmosContainer container; private CosmosAsyncContainer cosmosAsyncContainer; @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) public void beforeClass() { assertThat(this.gatewayClient).isNull(); gatewayClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); directClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(this.gatewayClient.asyncClient()); cosmosAsyncDatabase = directClient.asyncClient().getDatabase(cosmosAsyncContainer.getDatabase().getId()); container = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); } @AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { if (this.gatewayClient != null) { this.gatewayClient.close(); } if (this.directClient != null) { this.directClient.close(); } } @DataProvider(name = "query") private Object[][] query() { return new Object[][]{ new Object[] { "Select * from c where c.id = 'wrongId'", true }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", true }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", true }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", true }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", true }, new Object[] { "Select * from c where c.id = 'wrongId'", false }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId'", false }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false }, }; } @DataProvider(name = "readAllItemsOfLogicalPartition") private Object[][] readAllItemsOfLogicalPartition() { return new Object[][]{ new Object[] { 1, true }, new Object[] { 5, null }, new Object[] { 20, null }, new Object[] { 1, false }, new Object[] { 5, false }, new Object[] { 20, false }, }; } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void gatewayDiagnostics() throws Exception { CosmosClient testGatewayClient = null; try { testGatewayClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); CosmosContainer container = testGatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"operationType\":\"Create\""); assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\""); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotNull(); validateTransportRequestTimelineGateway(diagnostics); validateRegionContacted(createResponse.getDiagnostics(), testGatewayClient.asyncClient()); isValidJSON(diagnostics); } finally { if (testGatewayClient != null) { testGatewayClient.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void gatewayDiagnosticsOnException() throws Exception { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = null; try { createResponse = this.container.createItem(internalObjectNode); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey")); CosmosItemResponse<InternalObjectNode> readResponse = this.container.readItem(BridgeInternal.getProperties(createResponse).getId(), new PartitionKey("wrongPartitionKey"), InternalObjectNode.class); fail("request should fail as partition key is wrong"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"statusCode\":404"); assertThat(diagnostics).contains("\"operationType\":\"Read\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotNull(); validateRegionContacted(createResponse.getDiagnostics(), this.container.asyncContainer.getDatabase().getClient()); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); validateTransportRequestTimelineGateway(diagnostics); isValidJSON(diagnostics); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void systemDiagnosticsForSystemStateInformation() { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = this.container.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("systemInformation"); assertThat(diagnostics).contains("usedMemory"); assertThat(diagnostics).contains("availableMemory"); assertThat(diagnostics).contains("systemCpuLoad"); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnostics() throws Exception { CosmosClient testDirectClient = null; try { testDirectClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer cosmosContainer = testDirectClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("supplementalResponseStatisticsList"); assertThat(diagnostics).contains("\"gatewayStatistics\":null"); assertThat(diagnostics).contains("addressResolutionStatistics"); assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\""); assertThat(diagnostics).contains("\"metaDataName\":\"PARTITION_KEY_RANGE_LOOK_UP\""); assertThat(diagnostics).contains("\"metaDataName\":\"SERVER_ADDRESS_LOOKUP\""); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).contains("\"backendLatencyInMs\""); assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotEmpty(); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); validateTransportRequestTimelineDirect(diagnostics); validateRegionContacted(createResponse.getDiagnostics(), testDirectClient.asyncClient()); isValidJSON(diagnostics); try { cosmosContainer.createItem(internalObjectNode); fail("expected 409"); } catch (CosmosException e) { diagnostics = e.getDiagnostics().toString(); assertThat(diagnostics).contains("\"backendLatencyInMs\""); validateTransportRequestTimelineDirect(e.getDiagnostics().toString()); } } finally { if (testDirectClient != null) { testDirectClient.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryPlanDiagnostics() throws JsonProcessingException { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); List<String> itemIdList = new ArrayList<>(); for(int i = 0; i< 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); if(i%20 == 0) { itemIdList.add(internalObjectNode.getId()); } } String queryDiagnostics = null; List<String> queryList = new ArrayList<>(); queryList.add("Select * from c"); StringBuilder queryBuilder = new StringBuilder("SELECT * from c where c.mypk in ("); for(int i = 0 ; i < itemIdList.size(); i++){ queryBuilder.append("'").append(itemIdList.get(i)).append("'"); if(i < (itemIdList.size()-1)) { queryBuilder.append(","); } else { queryBuilder.append(")"); } } queryList.add(queryBuilder.toString()); queryList.add("Select * from c where c.id = 'wrongId'"); for(String query : queryList) { int feedResponseCounter = 0; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(true); Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); if (feedResponseCounter == 0) { assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)="); String requestTimeLine = OBJECT_MAPPER.writeValueAsString(feedResponse.getCosmosDiagnostics().getFeedResponseDiagnostics().getQueryPlanDiagnosticsContext().getRequestTimeline()); assertThat(requestTimeLine).contains("connectionConfigured"); assertThat(requestTimeLine).contains("requestSent"); assertThat(requestTimeLine).contains("transitTime"); assertThat(requestTimeLine).contains("received"); } else { assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan RequestTimeline ="); } feedResponseCounter++; } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryMetricsWithIndexMetrics() { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); List<String> itemIdList = new ArrayList<>(); for(int i = 0; i< 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); if(i%20 == 0) { itemIdList.add(internalObjectNode.getId()); } } String queryDiagnostics = null; List<String> queryList = new ArrayList<>(); StringBuilder queryBuilder = new StringBuilder("SELECT * from c where c.mypk in ("); for(int i = 0 ; i < itemIdList.size(); i++){ queryBuilder.append("'").append(itemIdList.get(i)).append("'"); if(i < (itemIdList.size()-1)) { queryBuilder.append(","); } else { queryBuilder.append(")"); } } queryList.add(queryBuilder.toString()); for (String query : queryList) { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(true); options.setIndexMetricsEnabled(true); Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); logger.info("This is query diagnostics {}", queryDiagnostics); if (feedResponse.getResponseHeaders().containsKey(HttpConstants.HttpHeaders.INDEX_UTILIZATION)) { assertThat(feedResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.INDEX_UTILIZATION)).isNotNull(); assertThat(createFromJSONString(Utils.decodeBase64String(feedResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.INDEX_UTILIZATION))).getUtilizedSingleIndexes()).isNotNull(); } } } } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT) public void queryMetrics(String query, Boolean qmEnabled) { CosmosContainer directContainer = this.directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()) .getContainer(this.cosmosAsyncContainer.getId()); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } boolean qroupByFirstResponse = true; Iterator<FeedResponse<InternalObjectNode>> iterator = directContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); assertThat(iterator.hasNext()).isTrue(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); assertThat(feedResponse.getResults().size()).isEqualTo(0); if (!query.contains("group by") || qroupByFirstResponse) { validateQueryDiagnostics(queryDiagnostics, qmEnabled, true); validateDirectModeQueryDiagnostics(queryDiagnostics); if (query.contains("group by")) { qroupByFirstResponse = false; } } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) private void validateDirectModeQueryDiagnostics(String diagnostics) { assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("supplementalResponseStatisticsList"); assertThat(diagnostics).contains("responseStatisticsList"); assertThat(diagnostics).contains("\"gatewayStatistics\":null"); assertThat(diagnostics).contains("addressResolutionStatistics"); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); } private void validateGatewayModeQueryDiagnostics(String diagnostics) { assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"operationType\":\"Query\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).contains("\"regionsContacted\""); } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT*2) public void queryDiagnosticsGatewayMode(String query, Boolean qmEnabled) { CosmosClient testDirectClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); CosmosContainer cosmosContainer = testDirectClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()) .getContainer(cosmosAsyncContainer.getId()); List<String> itemIdList = new ArrayList<>(); for (int i = 0; i < 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); if (i % 20 == 0) { itemIdList.add(internalObjectNode.getId()); } } boolean qroupByFirstResponse = true; if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer .queryItems(query, options, InternalObjectNode.class) .iterableByPage() .iterator(); assertThat(iterator.hasNext()).isTrue(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); assertThat(feedResponse.getResults().size()).isEqualTo(0); if (!query.contains("group by") || qroupByFirstResponse) { validateQueryDiagnostics(queryDiagnostics, qmEnabled, true); validateGatewayModeQueryDiagnostics(queryDiagnostics); if (query.contains("group by")) { qroupByFirstResponse = false; } } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryMetricsWithADifferentLocale() { Locale.setDefault(Locale.GERMAN); String query = "select * from root where root.id= \"someid\""; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); Iterator<FeedResponse<InternalObjectNode>> iterator = this.container.queryItems(query, options, InternalObjectNode.class) .iterableByPage().iterator(); double requestCharge = 0; while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); requestCharge += feedResponse.getRequestCharge(); } assertThat(requestCharge).isGreaterThan(0); Locale.setDefault(Locale.ROOT); } private static void validateQueryDiagnostics( String queryDiagnostics, Boolean qmEnabled, boolean expectQueryPlanDiagnostics) { if (qmEnabled == null || qmEnabled) { assertThat(queryDiagnostics).contains("Retrieved Document Count"); assertThat(queryDiagnostics).contains("Query Preparation Times"); assertThat(queryDiagnostics).contains("Runtime Execution Times"); assertThat(queryDiagnostics).contains("Partition Execution Timeline"); } else { assertThat(queryDiagnostics).doesNotContain("Retrieved Document Count"); assertThat(queryDiagnostics).doesNotContain("Query Preparation Times"); assertThat(queryDiagnostics).doesNotContain("Runtime Execution Times"); assertThat(queryDiagnostics).doesNotContain("Partition Execution Timeline"); } if (expectQueryPlanDiagnostics) { assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)="); } else { assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)="); } } @Test(groups = {"simple"}, dataProvider = "readAllItemsOfLogicalPartition", timeOut = TIMEOUT) public void queryMetricsForReadAllItemsOfLogicalPartition(Integer expectedItemCount, Boolean qmEnabled) { String pkValue = UUID.randomUUID().toString(); for (int i = 0; i < expectedItemCount; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(pkValue); CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(internalObjectNode); } CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options = options.setQueryMetricsEnabled(qmEnabled); } ModelBridgeInternal.setQueryRequestOptionsMaxItemCount(options, 5); Iterator<FeedResponse<InternalObjectNode>> iterator = this.container .readAllItems( new PartitionKey(pkValue), options, InternalObjectNode.class) .iterableByPage().iterator(); assertThat(iterator.hasNext()).isTrue(); int actualItemCount = 0; while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); actualItemCount += feedResponse.getResults().size(); validateQueryDiagnostics(queryDiagnostics, qmEnabled, false); } assertThat(actualItemCount).isEqualTo(expectedItemCount); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnosticsOnException() throws Exception { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = null; CosmosClient client = null; try { client = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); createResponse = container.createItem(internalObjectNode); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey")); CosmosItemResponse<InternalObjectNode> readResponse = cosmosContainer.readItem(BridgeInternal.getProperties(createResponse).getId(), new PartitionKey("wrongPartitionKey"), InternalObjectNode.class); fail("request should fail as partition key is wrong"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(exception.getDiagnostics().getContactedRegionNames()).isNotEmpty(); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); assertThat(diagnostics).contains("\"backendLatencyInMs\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); isValidJSON(diagnostics); validateTransportRequestTimelineDirect(diagnostics); validateRegionContacted(createResponse.getDiagnostics(), client.asyncClient()); } finally { if (client != null) { client.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnosticsOnMetadataException() { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosClient client = null; try { client = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); HttpClient mockHttpClient = Mockito.mock(HttpClient.class); Mockito.when(mockHttpClient.send(Mockito.any(HttpRequest.class), Mockito.any(Duration.class))) .thenReturn(Mono.error(new CosmosException(400, "TestBadRequest"))); RxStoreModel rxGatewayStoreModel = rxGatewayStoreModel = ReflectionUtils.getGatewayProxy((RxDocumentClientImpl) client.asyncClient().getDocClientWrapper()); ReflectionUtils.setGatewayHttpClient(rxGatewayStoreModel, mockHttpClient); container.createItem(internalObjectNode); fail("request should fail as bad request"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.BADREQUEST); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(diagnostics).contains("\"resourceType\":\"DocumentCollection\""); assertThat(exception.getDiagnostics().getContactedRegionNames()).isNotEmpty(); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); isValidJSON(diagnostics); } finally { if (client != null) { client.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void supplementalResponseStatisticsList() throws Exception { ClientSideRequestStatistics clientSideRequestStatistics = new ClientSideRequestStatistics(mockDiagnosticsClientContext(),null); for (int i = 0; i < 15; i++) { RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document); clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null); } List<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); ObjectMapper objectMapper = new ObjectMapper(); String diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics); JsonNode jsonNode = objectMapper.readTree(diagnostics); ArrayNode supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList"); assertThat(storeResponseStatistics.size()).isEqualTo(15); assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(10); clearStoreResponseStatistics(clientSideRequestStatistics); storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); assertThat(storeResponseStatistics.size()).isEqualTo(0); for (int i = 0; i < 7; i++) { RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document); clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null); } storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); objectMapper = new ObjectMapper(); diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics); jsonNode = objectMapper.readTree(diagnostics); supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList"); assertThat(storeResponseStatistics.size()).isEqualTo(7); assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(7); for(JsonNode node : supplementalResponseStatisticsListNode) { assertThat(node.get("storeResult").asText()).isNotNull(); String requestResponseTimeUTC = node.get("requestResponseTimeUTC").asText(); Instant instant = Instant.from(RESPONSE_TIME_FORMATTER.parse(requestResponseTimeUTC)); assertThat(Instant.now().toEpochMilli() - instant.toEpochMilli()).isLessThan(5000); assertThat(node.get("requestResponseTimeUTC")).isNotNull(); assertThat(node.get("requestOperationType")).isNotNull(); assertThat(node.get("requestOperationType")).isNotNull(); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void serializationOnVariousScenarios() { CosmosDatabaseResponse cosmosDatabase = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).read(); String diagnostics = cosmosDatabase.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"DATABASE_DESERIALIZATION\""); CosmosContainerResponse containerResponse = this.container.read(); diagnostics = containerResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"CONTAINER_DESERIALIZATION\""); TestItem testItem = new TestItem(); testItem.id = "TestId"; testItem.mypk = "TestPk"; CosmosItemResponse<TestItem> itemResponse = this.container.createItem(testItem); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); testItem.id = "TestId2"; testItem.mypk = "TestPk"; itemResponse = this.container.createItem(testItem, new PartitionKey("TestPk"), null); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).doesNotContain("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).doesNotContain("\"serializationType\":\"ITEM_DESERIALIZATION\""); TestItem readTestItem = itemResponse.getItem(); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\""); CosmosItemResponse<InternalObjectNode> readItemResponse = this.container.readItem(testItem.id, new PartitionKey(testItem.mypk), null, InternalObjectNode.class); InternalObjectNode properties = readItemResponse.getItem(); diagnostics = readItemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void rntbdRequestResponseLengthStatistics() throws Exception { TestItem testItem = new TestItem(); testItem.id = UUID.randomUUID().toString(); testItem.mypk = UUID.randomUUID().toString(); int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length; CosmosContainer container = directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId()); CosmosItemResponse<TestItem> createItemResponse = container.createItem(testItem); validate(createItemResponse.getDiagnostics(), testItemLength, ModelBridgeInternal.getPayloadLength(createItemResponse)); try { container.createItem(testItem); fail("expected to fail due to 409"); } catch (CosmosException e) { validate(e.getDiagnostics(), testItemLength, 0); } CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class); validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse)); CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null); validate(deleteItemResponse.getDiagnostics(), 0, 0); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void rntbdStatistics() throws Exception { Instant beforeClientInitialization = Instant.now(); CosmosClient client1 = null; try { client1 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .directMode() .buildClient(); TestItem testItem = new TestItem(); testItem.id = UUID.randomUUID().toString(); testItem.mypk = UUID.randomUUID().toString(); int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length; CosmosContainer container = client1.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId()); Thread.sleep(1000); Instant beforeInitializingRntbdServiceEndpoint = Instant.now(); CosmosItemResponse<TestItem> operation1Response = container.upsertItem(testItem); Instant afterInitializingRntbdServiceEndpoint = Instant.now(); Thread.sleep(1000); Instant beforeOperation2 = Instant.now(); CosmosItemResponse<TestItem> operation2Response = container.upsertItem(testItem); Instant afterOperation2 = Instant.now(); Thread.sleep(1000); Instant beforeOperation3 = Instant.now(); CosmosItemResponse<TestItem> operation3Response = container.upsertItem(testItem); Instant afterOperation3 = Instant.now(); validateRntbdStatistics(operation3Response.getDiagnostics(), beforeClientInitialization, beforeInitializingRntbdServiceEndpoint, afterInitializingRntbdServiceEndpoint, beforeOperation2, afterOperation2, beforeOperation3, afterOperation3); CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class); validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse)); CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null); validate(deleteItemResponse.getDiagnostics(), 0, 0); } finally { LifeCycleUtils.closeQuietly(client1); } } private void validateRntbdStatistics(CosmosDiagnostics cosmosDiagnostics, Instant clientInitializationTime, Instant beforeInitializingRntbdServiceEndpoint, Instant afterInitializingRntbdServiceEndpoint, Instant beforeOperation2, Instant afterOperation2, Instant beforeOperation3, Instant afterOperation3) throws Exception { ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString()); JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); assertThat(responseStatisticsList.size()).isGreaterThan(0); JsonNode storeResult = responseStatisticsList.get(0).get("storeResult"); assertThat(storeResult).isNotNull(); boolean hasPayload = storeResult.get("exception").isNull(); assertThat(storeResult.get("channelTaskQueueSize").asInt(-1)).isGreaterThan(0); assertThat(storeResult.get("pendingRequestsCount").asInt(-1)).isGreaterThanOrEqualTo(0); JsonNode serviceEndpointStatistics = storeResult.get("serviceEndpointStatistics"); assertThat(serviceEndpointStatistics).isNotNull(); assertThat(serviceEndpointStatistics.get("availableChannels").asInt(-1)).isGreaterThan(0); assertThat(serviceEndpointStatistics.get("acquiredChannels").asInt(-1)).isEqualTo(0); assertThat(serviceEndpointStatistics.get("inflightRequests").asInt(-1)).isEqualTo(1); assertThat(serviceEndpointStatistics.get("isClosed").asBoolean()).isEqualTo(false); Instant beforeInitializationThreshold = beforeInitializingRntbdServiceEndpoint.minusMillis(1); assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText())) .isAfterOrEqualTo(beforeInitializationThreshold); Instant afterInitializationThreshold = afterInitializingRntbdServiceEndpoint.plusMillis(2); assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText())) .isBeforeOrEqualTo(afterInitializationThreshold); Instant afterOperation2Threshold = afterOperation2.plusMillis(2); Instant beforeOperation2Threshold = beforeOperation2.minusMillis(2); assertThat(Instant.parse(serviceEndpointStatistics.get("lastRequestTime").asText())) .isAfterOrEqualTo(beforeOperation2Threshold) .isBeforeOrEqualTo(afterOperation2Threshold); assertThat(Instant.parse(serviceEndpointStatistics.get("lastSuccessfulRequestTime").asText())) .isAfterOrEqualTo(beforeOperation2Threshold) .isBeforeOrEqualTo(afterOperation2Threshold); } private void validate(CosmosDiagnostics cosmosDiagnostics, int expectedRequestPayloadSize, int expectedResponsePayloadSize) throws Exception { ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString()); JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); assertThat(responseStatisticsList.size()).isGreaterThan(0); JsonNode storeResult = responseStatisticsList.get(0).get("storeResult"); boolean hasPayload = storeResult.get("exception").isNull(); assertThat(storeResult).isNotNull(); assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize); assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize); assertThat(storeResult.get("requestPayloadLengthInBytes").asInt(-1)).isEqualTo(expectedRequestPayloadSize); if (hasPayload) { assertThat(storeResult.get("responsePayloadLengthInBytes").asInt(-1)).isEqualTo(expectedResponsePayloadSize); } assertThat(storeResult.get("rntbdResponseLengthInBytes").asInt(-1)).isGreaterThan(expectedResponsePayloadSize); } @Test(groups = {"emulator"}, timeOut = TIMEOUT) public void addressResolutionStatistics() { CosmosClient client1 = null; CosmosClient client2 = null; String databaseId = DatabaseForTest.generateId(); String containerId = UUID.randomUUID().toString(); CosmosDatabase cosmosDatabase = null; CosmosContainer cosmosContainer = null; try { client1 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); client1.createDatabase(databaseId); cosmosDatabase = client1.getDatabase(databaseId); cosmosDatabase.createContainer(containerId, "/mypk"); InternalObjectNode internalObjectNode = getInternalObjectNode(); cosmosContainer = cosmosDatabase.getContainer(containerId); CosmosItemResponse<InternalObjectNode> writeResourceResponse = cosmosContainer.createItem(internalObjectNode); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics"); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false"); assertThat(writeResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\""); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":null"); assertThat(writeResourceResponse.getDiagnostics().toString()).doesNotContain("\"errorMessage\":\"io.netty" + ".channel.AbstractChannel$AnnotatedConnectException: Connection refused: no further information"); client2 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); cosmosDatabase = client2.getDatabase(databaseId); cosmosContainer = cosmosDatabase.getContainer(containerId); AsyncDocumentClient asyncDocumentClient = client2.asyncClient().getContextClient(); GlobalAddressResolver addressResolver = (GlobalAddressResolver) FieldUtils.readField(asyncDocumentClient, "addressResolver", true); @SuppressWarnings("rawtypes") Map addressCacheByEndpoint = (Map) FieldUtils.readField(addressResolver, "addressCacheByEndpoint", true); Object endpointCache = addressCacheByEndpoint.values().toArray()[0]; GatewayAddressCache addressCache = (GatewayAddressCache) FieldUtils.readField(endpointCache, "addressCache", true); HttpClient httpClient = httpClient(true); FieldUtils.writeField(addressCache, "httpClient", httpClient, true); new Thread(() -> { try { Thread.sleep(5000); HttpClient httpClient1 = httpClient(false); FieldUtils.writeField(addressCache, "httpClient", httpClient1, true); } catch (Exception e) { fail(e.getMessage()); } }).start(); PartitionKey partitionKey = new PartitionKey(internalObjectNode.get("mypk")); CosmosItemResponse<InternalObjectNode> readResourceResponse = cosmosContainer.readItem(internalObjectNode.getId(), partitionKey, new CosmosItemRequestOptions(), InternalObjectNode.class); assertThat(readResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics"); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false"); assertThat(readResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\""); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":null"); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":\"io.netty" + ".channel.AbstractChannel$AnnotatedConnectException: Connection refused"); } catch (Exception ex) { logger.error("Error in test addressResolutionStatistics", ex); fail("This test should not throw exception " + ex); } finally { safeDeleteSyncDatabase(cosmosDatabase); if (client1 != null) { client1.close(); } if (client2 != null) { client2.close(); } } } private InternalObjectNode getInternalObjectNode() { InternalObjectNode internalObjectNode = new InternalObjectNode(); String uuid = UUID.randomUUID().toString(); internalObjectNode.setId(uuid); BridgeInternal.setProperty(internalObjectNode, "mypk", uuid); return internalObjectNode; } private InternalObjectNode getInternalObjectNode(String pkValue) { InternalObjectNode internalObjectNode = new InternalObjectNode(); String uuid = UUID.randomUUID().toString(); internalObjectNode.setId(uuid); BridgeInternal.setProperty(internalObjectNode, "mypk", pkValue); return internalObjectNode; } private List<ClientSideRequestStatistics.StoreResponseStatistics> getStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception { Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList"); storeResponseStatisticsField.setAccessible(true); @SuppressWarnings({"unchecked"}) List<ClientSideRequestStatistics.StoreResponseStatistics> list = (List<ClientSideRequestStatistics.StoreResponseStatistics>) storeResponseStatisticsField.get(requestStatistics); return list; } private void clearStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception { Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList"); storeResponseStatisticsField.setAccessible(true); storeResponseStatisticsField.set(requestStatistics, new ArrayList<ClientSideRequestStatistics.StoreResponseStatistics>()); } private void validateTransportRequestTimelineGateway(String diagnostics) { assertThat(diagnostics).contains("\"eventName\":\"connectionConfigured\""); assertThat(diagnostics).contains("\"eventName\":\"connectionConfigured\""); assertThat(diagnostics).contains("\"eventName\":\"requestSent\""); assertThat(diagnostics).contains("\"eventName\":\"transitTime\""); assertThat(diagnostics).contains("\"eventName\":\"received\""); } private void validateTransportRequestTimelineDirect(String diagnostics) { assertThat(diagnostics).contains("\"eventName\":\"created\""); assertThat(diagnostics).contains("\"eventName\":\"queued\""); assertThat(diagnostics).contains("\"eventName\":\"channelAcquisitionStarted\""); assertThat(diagnostics).contains("\"eventName\":\"pipelined\""); assertThat(diagnostics).contains("\"eventName\":\"transitTime\""); assertThat(diagnostics).contains("\"eventName\":\"received\""); assertThat(diagnostics).contains("\"eventName\":\"completed\""); assertThat(diagnostics).contains("\"startTimeUTC\""); assertThat(diagnostics).contains("\"durationInMicroSec\""); } public void isValidJSON(final String json) { try { final JsonParser parser = new ObjectMapper().createParser(json); while (parser.nextToken() != null) { } } catch (IOException ex) { fail("Diagnostic string is not in json format ", ex); } } private HttpClient httpClient(boolean fakeProxy) { HttpClientConfig httpClientConfig; if(fakeProxy) { httpClientConfig = new HttpClientConfig(new Configs()) .withProxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress("localhost", 8888))); } else { httpClientConfig = new HttpClientConfig(new Configs()); } return HttpClient.createFixed(httpClientConfig); } private IndexUtilizationInfo createFromJSONString(String jsonString) { ObjectMapper indexUtilizationInfoObjectMapper = new ObjectMapper(); IndexUtilizationInfo indexUtilizationInfo = null; try { indexUtilizationInfo = indexUtilizationInfoObjectMapper.readValue(jsonString, IndexUtilizationInfo.class); } catch (JsonProcessingException e) { logger.error("Json not correctly formed ", e); } return indexUtilizationInfo; } private void validateRegionContacted(CosmosDiagnostics cosmosDiagnostics, CosmosAsyncClient cosmosAsyncClient) throws Exception { RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) ReflectionUtils.getAsyncDocumentClient(cosmosAsyncClient); GlobalEndpointManager globalEndpointManager = ReflectionUtils.getGlobalEndpointManager(rxDocumentClient); LocationCache locationCache = ReflectionUtils.getLocationCache(globalEndpointManager); Field locationInfoField = LocationCache.class.getDeclaredField("locationInfo"); locationInfoField.setAccessible(true); Object locationInfo = locationInfoField.get(locationCache); Class<?> DatabaseAccountLocationsInfoClass = Class.forName("com.azure.cosmos.implementation.routing" + ".LocationCache$DatabaseAccountLocationsInfo"); Field availableWriteEndpointByLocation = DatabaseAccountLocationsInfoClass.getDeclaredField( "availableWriteEndpointByLocation"); availableWriteEndpointByLocation.setAccessible(true); @SuppressWarnings("unchecked") Map<String, URI> map = (Map<String, URI>) availableWriteEndpointByLocation.get(locationInfo); String regionName = map.keySet().iterator().next(); assertThat(cosmosDiagnostics.getContactedRegionNames().size()).isEqualTo(1); assertThat(cosmosDiagnostics.getContactedRegionNames().iterator().next()).isEqualTo(regionName.toLowerCase()); } public static class TestItem { public String id; public String mypk; public TestItem() { } } }
I wonder should we check the unique value of the pkRangeId. because sometimes retry could happen, and the test may fail due to it
public void queryDiagnosticsOnOrderBy() { String containerId = "testcontainer"; cosmosAsyncDatabase.createContainer(containerId, "/mypk", ThroughputProperties.createManualThroughput(40000)).block(); CosmosAsyncContainer testcontainer = cosmosAsyncDatabase.getContainer(containerId); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); testcontainer.createItem(getInternalObjectNode()).block(); options.setMaxDegreeOfParallelism(-1); String query = "SELECT * from c ORDER BY c._ts DESC"; CosmosPagedFlux<InternalObjectNode> cosmosPagedFlux = testcontainer.queryItems(query, options, InternalObjectNode.class); AtomicInteger counterPkRid = new AtomicInteger(); AtomicInteger counterPartitionKeyRangeId = new AtomicInteger(); cosmosPagedFlux.byPage().flatMap(feedResponse -> { String cosmosDiagnosticsString = feedResponse.getCosmosDiagnostics().toString(); Pattern pattern = Pattern.compile("\"partitionKeyRangeId\":\""); Matcher matcher = pattern.matcher(cosmosDiagnosticsString); while (matcher.find()) { counterPartitionKeyRangeId.incrementAndGet(); } pattern = Pattern.compile("pkrId:"); matcher = pattern.matcher(cosmosDiagnosticsString); while (matcher.find()) { counterPkRid.incrementAndGet(); } return Flux.just(feedResponse); }).blockLast(); assertThat(counterPkRid.get() * 2).isEqualTo(counterPartitionKeyRangeId.get()); deleteCollection(testcontainer); }
Pattern pattern = Pattern.compile("\"partitionKeyRangeId\":\"");
public void queryDiagnosticsOnOrderBy() { String containerId = "testcontainer"; cosmosAsyncDatabase.createContainer(containerId, "/mypk", ThroughputProperties.createManualThroughput(40000)).block(); CosmosAsyncContainer testcontainer = cosmosAsyncDatabase.getContainer(containerId); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setConsistencyLevel(ConsistencyLevel.EVENTUAL); testcontainer.createItem(getInternalObjectNode()).block(); options.setMaxDegreeOfParallelism(-1); String query = "SELECT * from c ORDER BY c._ts DESC"; CosmosPagedFlux<InternalObjectNode> cosmosPagedFlux = testcontainer.queryItems(query, options, InternalObjectNode.class); Set<String> partitionKeyRangeIds = new HashSet<>(); Set<String> pkRids = new HashSet<>(); cosmosPagedFlux.byPage().flatMap(feedResponse -> { String cosmosDiagnosticsString = feedResponse.getCosmosDiagnostics().toString(); Pattern pattern = Pattern.compile("(\"partitionKeyRangeId\":\")(\\d)"); Matcher matcher = pattern.matcher(cosmosDiagnosticsString); while (matcher.find()) { String group = matcher.group(2); partitionKeyRangeIds.add(group); } pattern = Pattern.compile("(pkrId:)(\\d)"); matcher = pattern.matcher(cosmosDiagnosticsString); while (matcher.find()) { String group = matcher.group(2); pkRids.add(group); } return Flux.just(feedResponse); }).blockLast(); assertThat(pkRids).isNotEmpty(); assertThat(pkRids).isEqualTo(partitionKeyRangeIds); deleteCollection(testcontainer); }
class CosmosDiagnosticsTest extends TestSuiteBase { private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private static final DateTimeFormatter RESPONSE_TIME_FORMATTER = DateTimeFormatter.ISO_INSTANT; private CosmosClient gatewayClient; private CosmosClient directClient; private CosmosAsyncDatabase cosmosAsyncDatabase; private CosmosContainer container; private CosmosAsyncContainer cosmosAsyncContainer; @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) public void beforeClass() { assertThat(this.gatewayClient).isNull(); gatewayClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); directClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(this.gatewayClient.asyncClient()); cosmosAsyncDatabase = directClient.asyncClient().getDatabase(cosmosAsyncContainer.getDatabase().getId()); container = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); } @AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { if (this.gatewayClient != null) { this.gatewayClient.close(); } if (this.directClient != null) { this.directClient.close(); } } @DataProvider(name = "query") private Object[][] query() { return new Object[][]{ new Object[] { "Select * from c where c.id = 'wrongId'", true }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", true }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", true }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", true }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", true }, new Object[] { "Select * from c where c.id = 'wrongId'", false }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId'", false }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false }, }; } @DataProvider(name = "readAllItemsOfLogicalPartition") private Object[][] readAllItemsOfLogicalPartition() { return new Object[][]{ new Object[] { 1, true }, new Object[] { 5, null }, new Object[] { 20, null }, new Object[] { 1, false }, new Object[] { 5, false }, new Object[] { 20, false }, }; } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void gatewayDiagnostics() throws Exception { CosmosClient testGatewayClient = null; try { testGatewayClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); CosmosContainer container = testGatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"operationType\":\"Create\""); assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\""); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotNull(); validateTransportRequestTimelineGateway(diagnostics); validateRegionContacted(createResponse.getDiagnostics(), testGatewayClient.asyncClient()); isValidJSON(diagnostics); } finally { if (testGatewayClient != null) { testGatewayClient.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void gatewayDiagnosticsOnException() throws Exception { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = null; try { createResponse = this.container.createItem(internalObjectNode); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey")); CosmosItemResponse<InternalObjectNode> readResponse = this.container.readItem(BridgeInternal.getProperties(createResponse).getId(), new PartitionKey("wrongPartitionKey"), InternalObjectNode.class); fail("request should fail as partition key is wrong"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"statusCode\":404"); assertThat(diagnostics).contains("\"operationType\":\"Read\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotNull(); validateRegionContacted(createResponse.getDiagnostics(), this.container.asyncContainer.getDatabase().getClient()); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); validateTransportRequestTimelineGateway(diagnostics); isValidJSON(diagnostics); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void systemDiagnosticsForSystemStateInformation() { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = this.container.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("systemInformation"); assertThat(diagnostics).contains("usedMemory"); assertThat(diagnostics).contains("availableMemory"); assertThat(diagnostics).contains("systemCpuLoad"); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnostics() throws Exception { CosmosClient testDirectClient = null; try { testDirectClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer cosmosContainer = testDirectClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("supplementalResponseStatisticsList"); assertThat(diagnostics).contains("\"gatewayStatistics\":null"); assertThat(diagnostics).contains("addressResolutionStatistics"); assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\""); assertThat(diagnostics).contains("\"metaDataName\":\"PARTITION_KEY_RANGE_LOOK_UP\""); assertThat(diagnostics).contains("\"metaDataName\":\"SERVER_ADDRESS_LOOKUP\""); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).contains("\"backendLatencyInMs\""); assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotEmpty(); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); validateTransportRequestTimelineDirect(diagnostics); validateRegionContacted(createResponse.getDiagnostics(), testDirectClient.asyncClient()); isValidJSON(diagnostics); try { cosmosContainer.createItem(internalObjectNode); fail("expected 409"); } catch (CosmosException e) { diagnostics = e.getDiagnostics().toString(); assertThat(diagnostics).contains("\"backendLatencyInMs\""); validateTransportRequestTimelineDirect(e.getDiagnostics().toString()); } } finally { if (testDirectClient != null) { testDirectClient.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryPlanDiagnostics() throws JsonProcessingException { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); List<String> itemIdList = new ArrayList<>(); for(int i = 0; i< 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); if(i%20 == 0) { itemIdList.add(internalObjectNode.getId()); } } String queryDiagnostics = null; List<String> queryList = new ArrayList<>(); queryList.add("Select * from c"); StringBuilder queryBuilder = new StringBuilder("SELECT * from c where c.mypk in ("); for(int i = 0 ; i < itemIdList.size(); i++){ queryBuilder.append("'").append(itemIdList.get(i)).append("'"); if(i < (itemIdList.size()-1)) { queryBuilder.append(","); } else { queryBuilder.append(")"); } } queryList.add(queryBuilder.toString()); queryList.add("Select * from c where c.id = 'wrongId'"); for(String query : queryList) { int feedResponseCounter = 0; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(true); Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); if (feedResponseCounter == 0) { assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)="); String requestTimeLine = OBJECT_MAPPER.writeValueAsString(feedResponse.getCosmosDiagnostics().getFeedResponseDiagnostics().getQueryPlanDiagnosticsContext().getRequestTimeline()); assertThat(requestTimeLine).contains("connectionConfigured"); assertThat(requestTimeLine).contains("requestSent"); assertThat(requestTimeLine).contains("transitTime"); assertThat(requestTimeLine).contains("received"); } else { assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan RequestTimeline ="); } feedResponseCounter++; } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryMetricsWithIndexMetrics() { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); List<String> itemIdList = new ArrayList<>(); for(int i = 0; i< 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); if(i%20 == 0) { itemIdList.add(internalObjectNode.getId()); } } String queryDiagnostics = null; List<String> queryList = new ArrayList<>(); StringBuilder queryBuilder = new StringBuilder("SELECT * from c where c.mypk in ("); for(int i = 0 ; i < itemIdList.size(); i++){ queryBuilder.append("'").append(itemIdList.get(i)).append("'"); if(i < (itemIdList.size()-1)) { queryBuilder.append(","); } else { queryBuilder.append(")"); } } queryList.add(queryBuilder.toString()); for (String query : queryList) { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(true); options.setIndexMetricsEnabled(true); Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); logger.info("This is query diagnostics {}", queryDiagnostics); if (feedResponse.getResponseHeaders().containsKey(HttpConstants.HttpHeaders.INDEX_UTILIZATION)) { assertThat(feedResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.INDEX_UTILIZATION)).isNotNull(); assertThat(createFromJSONString(Utils.decodeBase64String(feedResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.INDEX_UTILIZATION))).getUtilizedSingleIndexes()).isNotNull(); } } } } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT) public void queryMetrics(String query, Boolean qmEnabled) { CosmosContainer directContainer = this.directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()) .getContainer(this.cosmosAsyncContainer.getId()); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } boolean qroupByFirstResponse = true; Iterator<FeedResponse<InternalObjectNode>> iterator = directContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); assertThat(iterator.hasNext()).isTrue(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); assertThat(feedResponse.getResults().size()).isEqualTo(0); if (!query.contains("group by") || qroupByFirstResponse) { validateQueryDiagnostics(queryDiagnostics, qmEnabled, true); validateDirectModeQueryDiagnostics(queryDiagnostics); if (query.contains("group by")) { qroupByFirstResponse = false; } } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) private void validateDirectModeQueryDiagnostics(String diagnostics) { assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("supplementalResponseStatisticsList"); assertThat(diagnostics).contains("responseStatisticsList"); assertThat(diagnostics).contains("\"gatewayStatistics\":null"); assertThat(diagnostics).contains("addressResolutionStatistics"); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); } private void validateGatewayModeQueryDiagnostics(String diagnostics) { assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"operationType\":\"Query\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).contains("\"regionsContacted\""); } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT*2) public void queryDiagnosticsGatewayMode(String query, Boolean qmEnabled) { CosmosClient testDirectClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); CosmosContainer cosmosContainer = testDirectClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()) .getContainer(cosmosAsyncContainer.getId()); List<String> itemIdList = new ArrayList<>(); for (int i = 0; i < 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); if (i % 20 == 0) { itemIdList.add(internalObjectNode.getId()); } } boolean qroupByFirstResponse = true; if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer .queryItems(query, options, InternalObjectNode.class) .iterableByPage() .iterator(); assertThat(iterator.hasNext()).isTrue(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); assertThat(feedResponse.getResults().size()).isEqualTo(0); if (!query.contains("group by") || qroupByFirstResponse) { validateQueryDiagnostics(queryDiagnostics, qmEnabled, true); validateGatewayModeQueryDiagnostics(queryDiagnostics); if (query.contains("group by")) { qroupByFirstResponse = false; } } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryMetricsWithADifferentLocale() { Locale.setDefault(Locale.GERMAN); String query = "select * from root where root.id= \"someid\""; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); Iterator<FeedResponse<InternalObjectNode>> iterator = this.container.queryItems(query, options, InternalObjectNode.class) .iterableByPage().iterator(); double requestCharge = 0; while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); requestCharge += feedResponse.getRequestCharge(); } assertThat(requestCharge).isGreaterThan(0); Locale.setDefault(Locale.ROOT); } private static void validateQueryDiagnostics( String queryDiagnostics, Boolean qmEnabled, boolean expectQueryPlanDiagnostics) { if (qmEnabled == null || qmEnabled) { assertThat(queryDiagnostics).contains("Retrieved Document Count"); assertThat(queryDiagnostics).contains("Query Preparation Times"); assertThat(queryDiagnostics).contains("Runtime Execution Times"); assertThat(queryDiagnostics).contains("Partition Execution Timeline"); } else { assertThat(queryDiagnostics).doesNotContain("Retrieved Document Count"); assertThat(queryDiagnostics).doesNotContain("Query Preparation Times"); assertThat(queryDiagnostics).doesNotContain("Runtime Execution Times"); assertThat(queryDiagnostics).doesNotContain("Partition Execution Timeline"); } if (expectQueryPlanDiagnostics) { assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)="); } else { assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)="); } } @Test(groups = {"simple"}, dataProvider = "readAllItemsOfLogicalPartition", timeOut = TIMEOUT) public void queryMetricsForReadAllItemsOfLogicalPartition(Integer expectedItemCount, Boolean qmEnabled) { String pkValue = UUID.randomUUID().toString(); for (int i = 0; i < expectedItemCount; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(pkValue); CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(internalObjectNode); } CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options = options.setQueryMetricsEnabled(qmEnabled); } ModelBridgeInternal.setQueryRequestOptionsMaxItemCount(options, 5); Iterator<FeedResponse<InternalObjectNode>> iterator = this.container .readAllItems( new PartitionKey(pkValue), options, InternalObjectNode.class) .iterableByPage().iterator(); assertThat(iterator.hasNext()).isTrue(); int actualItemCount = 0; while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); actualItemCount += feedResponse.getResults().size(); validateQueryDiagnostics(queryDiagnostics, qmEnabled, false); } assertThat(actualItemCount).isEqualTo(expectedItemCount); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnosticsOnException() throws Exception { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = null; CosmosClient client = null; try { client = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); createResponse = container.createItem(internalObjectNode); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey")); CosmosItemResponse<InternalObjectNode> readResponse = cosmosContainer.readItem(BridgeInternal.getProperties(createResponse).getId(), new PartitionKey("wrongPartitionKey"), InternalObjectNode.class); fail("request should fail as partition key is wrong"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(exception.getDiagnostics().getContactedRegionNames()).isNotEmpty(); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); assertThat(diagnostics).contains("\"backendLatencyInMs\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); isValidJSON(diagnostics); validateTransportRequestTimelineDirect(diagnostics); validateRegionContacted(createResponse.getDiagnostics(), client.asyncClient()); } finally { if (client != null) { client.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnosticsOnMetadataException() { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosClient client = null; try { client = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); HttpClient mockHttpClient = Mockito.mock(HttpClient.class); Mockito.when(mockHttpClient.send(Mockito.any(HttpRequest.class), Mockito.any(Duration.class))) .thenReturn(Mono.error(new CosmosException(400, "TestBadRequest"))); RxStoreModel rxGatewayStoreModel = rxGatewayStoreModel = ReflectionUtils.getGatewayProxy((RxDocumentClientImpl) client.asyncClient().getDocClientWrapper()); ReflectionUtils.setGatewayHttpClient(rxGatewayStoreModel, mockHttpClient); container.createItem(internalObjectNode); fail("request should fail as bad request"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.BADREQUEST); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(diagnostics).contains("\"resourceType\":\"DocumentCollection\""); assertThat(exception.getDiagnostics().getContactedRegionNames()).isNotEmpty(); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); isValidJSON(diagnostics); } finally { if (client != null) { client.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void supplementalResponseStatisticsList() throws Exception { ClientSideRequestStatistics clientSideRequestStatistics = new ClientSideRequestStatistics(mockDiagnosticsClientContext(),null); for (int i = 0; i < 15; i++) { RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document); clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null); } List<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); ObjectMapper objectMapper = new ObjectMapper(); String diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics); JsonNode jsonNode = objectMapper.readTree(diagnostics); ArrayNode supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList"); assertThat(storeResponseStatistics.size()).isEqualTo(15); assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(10); clearStoreResponseStatistics(clientSideRequestStatistics); storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); assertThat(storeResponseStatistics.size()).isEqualTo(0); for (int i = 0; i < 7; i++) { RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document); clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null); } storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); objectMapper = new ObjectMapper(); diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics); jsonNode = objectMapper.readTree(diagnostics); supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList"); assertThat(storeResponseStatistics.size()).isEqualTo(7); assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(7); for(JsonNode node : supplementalResponseStatisticsListNode) { assertThat(node.get("storeResult").asText()).isNotNull(); String requestResponseTimeUTC = node.get("requestResponseTimeUTC").asText(); Instant instant = Instant.from(RESPONSE_TIME_FORMATTER.parse(requestResponseTimeUTC)); assertThat(Instant.now().toEpochMilli() - instant.toEpochMilli()).isLessThan(5000); assertThat(node.get("requestResponseTimeUTC")).isNotNull(); assertThat(node.get("requestOperationType")).isNotNull(); assertThat(node.get("requestOperationType")).isNotNull(); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void serializationOnVariousScenarios() { CosmosDatabaseResponse cosmosDatabase = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).read(); String diagnostics = cosmosDatabase.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"DATABASE_DESERIALIZATION\""); CosmosContainerResponse containerResponse = this.container.read(); diagnostics = containerResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"CONTAINER_DESERIALIZATION\""); TestItem testItem = new TestItem(); testItem.id = "TestId"; testItem.mypk = "TestPk"; CosmosItemResponse<TestItem> itemResponse = this.container.createItem(testItem); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); testItem.id = "TestId2"; testItem.mypk = "TestPk"; itemResponse = this.container.createItem(testItem, new PartitionKey("TestPk"), null); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).doesNotContain("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).doesNotContain("\"serializationType\":\"ITEM_DESERIALIZATION\""); TestItem readTestItem = itemResponse.getItem(); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\""); CosmosItemResponse<InternalObjectNode> readItemResponse = this.container.readItem(testItem.id, new PartitionKey(testItem.mypk), null, InternalObjectNode.class); InternalObjectNode properties = readItemResponse.getItem(); diagnostics = readItemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void rntbdRequestResponseLengthStatistics() throws Exception { TestItem testItem = new TestItem(); testItem.id = UUID.randomUUID().toString(); testItem.mypk = UUID.randomUUID().toString(); int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length; CosmosContainer container = directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId()); CosmosItemResponse<TestItem> createItemResponse = container.createItem(testItem); validate(createItemResponse.getDiagnostics(), testItemLength, ModelBridgeInternal.getPayloadLength(createItemResponse)); try { container.createItem(testItem); fail("expected to fail due to 409"); } catch (CosmosException e) { validate(e.getDiagnostics(), testItemLength, 0); } CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class); validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse)); CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null); validate(deleteItemResponse.getDiagnostics(), 0, 0); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void rntbdStatistics() throws Exception { Instant beforeClientInitialization = Instant.now(); CosmosClient client1 = null; try { client1 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .directMode() .buildClient(); TestItem testItem = new TestItem(); testItem.id = UUID.randomUUID().toString(); testItem.mypk = UUID.randomUUID().toString(); int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length; CosmosContainer container = client1.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId()); Thread.sleep(1000); Instant beforeInitializingRntbdServiceEndpoint = Instant.now(); CosmosItemResponse<TestItem> operation1Response = container.upsertItem(testItem); Instant afterInitializingRntbdServiceEndpoint = Instant.now(); Thread.sleep(1000); Instant beforeOperation2 = Instant.now(); CosmosItemResponse<TestItem> operation2Response = container.upsertItem(testItem); Instant afterOperation2 = Instant.now(); Thread.sleep(1000); Instant beforeOperation3 = Instant.now(); CosmosItemResponse<TestItem> operation3Response = container.upsertItem(testItem); Instant afterOperation3 = Instant.now(); validateRntbdStatistics(operation3Response.getDiagnostics(), beforeClientInitialization, beforeInitializingRntbdServiceEndpoint, afterInitializingRntbdServiceEndpoint, beforeOperation2, afterOperation2, beforeOperation3, afterOperation3); CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class); validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse)); CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null); validate(deleteItemResponse.getDiagnostics(), 0, 0); } finally { LifeCycleUtils.closeQuietly(client1); } } private void validateRntbdStatistics(CosmosDiagnostics cosmosDiagnostics, Instant clientInitializationTime, Instant beforeInitializingRntbdServiceEndpoint, Instant afterInitializingRntbdServiceEndpoint, Instant beforeOperation2, Instant afterOperation2, Instant beforeOperation3, Instant afterOperation3) throws Exception { ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString()); JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); assertThat(responseStatisticsList.size()).isGreaterThan(0); JsonNode storeResult = responseStatisticsList.get(0).get("storeResult"); assertThat(storeResult).isNotNull(); boolean hasPayload = storeResult.get("exception").isNull(); assertThat(storeResult.get("channelTaskQueueSize").asInt(-1)).isGreaterThan(0); assertThat(storeResult.get("pendingRequestsCount").asInt(-1)).isGreaterThanOrEqualTo(0); JsonNode serviceEndpointStatistics = storeResult.get("serviceEndpointStatistics"); assertThat(serviceEndpointStatistics).isNotNull(); assertThat(serviceEndpointStatistics.get("availableChannels").asInt(-1)).isGreaterThan(0); assertThat(serviceEndpointStatistics.get("acquiredChannels").asInt(-1)).isEqualTo(0); assertThat(serviceEndpointStatistics.get("inflightRequests").asInt(-1)).isEqualTo(1); assertThat(serviceEndpointStatistics.get("isClosed").asBoolean()).isEqualTo(false); Instant beforeInitializationThreshold = beforeInitializingRntbdServiceEndpoint.minusMillis(1); assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText())) .isAfterOrEqualTo(beforeInitializationThreshold); Instant afterInitializationThreshold = afterInitializingRntbdServiceEndpoint.plusMillis(2); assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText())) .isBeforeOrEqualTo(afterInitializationThreshold); Instant afterOperation2Threshold = afterOperation2.plusMillis(2); Instant beforeOperation2Threshold = beforeOperation2.minusMillis(2); assertThat(Instant.parse(serviceEndpointStatistics.get("lastRequestTime").asText())) .isAfterOrEqualTo(beforeOperation2Threshold) .isBeforeOrEqualTo(afterOperation2Threshold); assertThat(Instant.parse(serviceEndpointStatistics.get("lastSuccessfulRequestTime").asText())) .isAfterOrEqualTo(beforeOperation2Threshold) .isBeforeOrEqualTo(afterOperation2Threshold); } private void validate(CosmosDiagnostics cosmosDiagnostics, int expectedRequestPayloadSize, int expectedResponsePayloadSize) throws Exception { ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString()); JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); assertThat(responseStatisticsList.size()).isGreaterThan(0); JsonNode storeResult = responseStatisticsList.get(0).get("storeResult"); boolean hasPayload = storeResult.get("exception").isNull(); assertThat(storeResult).isNotNull(); assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize); assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize); assertThat(storeResult.get("requestPayloadLengthInBytes").asInt(-1)).isEqualTo(expectedRequestPayloadSize); if (hasPayload) { assertThat(storeResult.get("responsePayloadLengthInBytes").asInt(-1)).isEqualTo(expectedResponsePayloadSize); } assertThat(storeResult.get("rntbdResponseLengthInBytes").asInt(-1)).isGreaterThan(expectedResponsePayloadSize); } @Test(groups = {"emulator"}, timeOut = TIMEOUT) public void addressResolutionStatistics() { CosmosClient client1 = null; CosmosClient client2 = null; String databaseId = DatabaseForTest.generateId(); String containerId = UUID.randomUUID().toString(); CosmosDatabase cosmosDatabase = null; CosmosContainer cosmosContainer = null; try { client1 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); client1.createDatabase(databaseId); cosmosDatabase = client1.getDatabase(databaseId); cosmosDatabase.createContainer(containerId, "/mypk"); InternalObjectNode internalObjectNode = getInternalObjectNode(); cosmosContainer = cosmosDatabase.getContainer(containerId); CosmosItemResponse<InternalObjectNode> writeResourceResponse = cosmosContainer.createItem(internalObjectNode); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics"); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false"); assertThat(writeResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\""); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":null"); assertThat(writeResourceResponse.getDiagnostics().toString()).doesNotContain("\"errorMessage\":\"io.netty" + ".channel.AbstractChannel$AnnotatedConnectException: Connection refused: no further information"); client2 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); cosmosDatabase = client2.getDatabase(databaseId); cosmosContainer = cosmosDatabase.getContainer(containerId); AsyncDocumentClient asyncDocumentClient = client2.asyncClient().getContextClient(); GlobalAddressResolver addressResolver = (GlobalAddressResolver) FieldUtils.readField(asyncDocumentClient, "addressResolver", true); @SuppressWarnings("rawtypes") Map addressCacheByEndpoint = (Map) FieldUtils.readField(addressResolver, "addressCacheByEndpoint", true); Object endpointCache = addressCacheByEndpoint.values().toArray()[0]; GatewayAddressCache addressCache = (GatewayAddressCache) FieldUtils.readField(endpointCache, "addressCache", true); HttpClient httpClient = httpClient(true); FieldUtils.writeField(addressCache, "httpClient", httpClient, true); new Thread(() -> { try { Thread.sleep(5000); HttpClient httpClient1 = httpClient(false); FieldUtils.writeField(addressCache, "httpClient", httpClient1, true); } catch (Exception e) { fail(e.getMessage()); } }).start(); PartitionKey partitionKey = new PartitionKey(internalObjectNode.get("mypk")); CosmosItemResponse<InternalObjectNode> readResourceResponse = cosmosContainer.readItem(internalObjectNode.getId(), partitionKey, new CosmosItemRequestOptions(), InternalObjectNode.class); assertThat(readResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics"); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false"); assertThat(readResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\""); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":null"); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":\"io.netty" + ".channel.AbstractChannel$AnnotatedConnectException: Connection refused"); } catch (Exception ex) { logger.error("Error in test addressResolutionStatistics", ex); fail("This test should not throw exception " + ex); } finally { safeDeleteSyncDatabase(cosmosDatabase); if (client1 != null) { client1.close(); } if (client2 != null) { client2.close(); } } } private InternalObjectNode getInternalObjectNode() { InternalObjectNode internalObjectNode = new InternalObjectNode(); String uuid = UUID.randomUUID().toString(); internalObjectNode.setId(uuid); BridgeInternal.setProperty(internalObjectNode, "mypk", uuid); return internalObjectNode; } private InternalObjectNode getInternalObjectNode(String pkValue) { InternalObjectNode internalObjectNode = new InternalObjectNode(); String uuid = UUID.randomUUID().toString(); internalObjectNode.setId(uuid); BridgeInternal.setProperty(internalObjectNode, "mypk", pkValue); return internalObjectNode; } private List<ClientSideRequestStatistics.StoreResponseStatistics> getStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception { Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList"); storeResponseStatisticsField.setAccessible(true); @SuppressWarnings({"unchecked"}) List<ClientSideRequestStatistics.StoreResponseStatistics> list = (List<ClientSideRequestStatistics.StoreResponseStatistics>) storeResponseStatisticsField.get(requestStatistics); return list; } private void clearStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception { Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList"); storeResponseStatisticsField.setAccessible(true); storeResponseStatisticsField.set(requestStatistics, new ArrayList<ClientSideRequestStatistics.StoreResponseStatistics>()); } private void validateTransportRequestTimelineGateway(String diagnostics) { assertThat(diagnostics).contains("\"eventName\":\"connectionConfigured\""); assertThat(diagnostics).contains("\"eventName\":\"connectionConfigured\""); assertThat(diagnostics).contains("\"eventName\":\"requestSent\""); assertThat(diagnostics).contains("\"eventName\":\"transitTime\""); assertThat(diagnostics).contains("\"eventName\":\"received\""); } private void validateTransportRequestTimelineDirect(String diagnostics) { assertThat(diagnostics).contains("\"eventName\":\"created\""); assertThat(diagnostics).contains("\"eventName\":\"queued\""); assertThat(diagnostics).contains("\"eventName\":\"channelAcquisitionStarted\""); assertThat(diagnostics).contains("\"eventName\":\"pipelined\""); assertThat(diagnostics).contains("\"eventName\":\"transitTime\""); assertThat(diagnostics).contains("\"eventName\":\"received\""); assertThat(diagnostics).contains("\"eventName\":\"completed\""); assertThat(diagnostics).contains("\"startTimeUTC\""); assertThat(diagnostics).contains("\"durationInMicroSec\""); } public void isValidJSON(final String json) { try { final JsonParser parser = new ObjectMapper().createParser(json); while (parser.nextToken() != null) { } } catch (IOException ex) { fail("Diagnostic string is not in json format ", ex); } } private HttpClient httpClient(boolean fakeProxy) { HttpClientConfig httpClientConfig; if(fakeProxy) { httpClientConfig = new HttpClientConfig(new Configs()) .withProxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress("localhost", 8888))); } else { httpClientConfig = new HttpClientConfig(new Configs()); } return HttpClient.createFixed(httpClientConfig); } private IndexUtilizationInfo createFromJSONString(String jsonString) { ObjectMapper indexUtilizationInfoObjectMapper = new ObjectMapper(); IndexUtilizationInfo indexUtilizationInfo = null; try { indexUtilizationInfo = indexUtilizationInfoObjectMapper.readValue(jsonString, IndexUtilizationInfo.class); } catch (JsonProcessingException e) { logger.error("Json not correctly formed ", e); } return indexUtilizationInfo; } private void validateRegionContacted(CosmosDiagnostics cosmosDiagnostics, CosmosAsyncClient cosmosAsyncClient) throws Exception { RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) ReflectionUtils.getAsyncDocumentClient(cosmosAsyncClient); GlobalEndpointManager globalEndpointManager = ReflectionUtils.getGlobalEndpointManager(rxDocumentClient); LocationCache locationCache = ReflectionUtils.getLocationCache(globalEndpointManager); Field locationInfoField = LocationCache.class.getDeclaredField("locationInfo"); locationInfoField.setAccessible(true); Object locationInfo = locationInfoField.get(locationCache); Class<?> DatabaseAccountLocationsInfoClass = Class.forName("com.azure.cosmos.implementation.routing" + ".LocationCache$DatabaseAccountLocationsInfo"); Field availableWriteEndpointByLocation = DatabaseAccountLocationsInfoClass.getDeclaredField( "availableWriteEndpointByLocation"); availableWriteEndpointByLocation.setAccessible(true); @SuppressWarnings("unchecked") Map<String, URI> map = (Map<String, URI>) availableWriteEndpointByLocation.get(locationInfo); String regionName = map.keySet().iterator().next(); assertThat(cosmosDiagnostics.getContactedRegionNames().size()).isEqualTo(1); assertThat(cosmosDiagnostics.getContactedRegionNames().iterator().next()).isEqualTo(regionName.toLowerCase()); } public static class TestItem { public String id; public String mypk; public TestItem() { } } }
class CosmosDiagnosticsTest extends TestSuiteBase { private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private static final DateTimeFormatter RESPONSE_TIME_FORMATTER = DateTimeFormatter.ISO_INSTANT; private CosmosClient gatewayClient; private CosmosClient directClient; private CosmosAsyncDatabase cosmosAsyncDatabase; private CosmosContainer container; private CosmosAsyncContainer cosmosAsyncContainer; @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) public void beforeClass() { assertThat(this.gatewayClient).isNull(); gatewayClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); directClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(this.gatewayClient.asyncClient()); cosmosAsyncDatabase = directClient.asyncClient().getDatabase(cosmosAsyncContainer.getDatabase().getId()); container = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); } @AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { if (this.gatewayClient != null) { this.gatewayClient.close(); } if (this.directClient != null) { this.directClient.close(); } } @DataProvider(name = "query") private Object[][] query() { return new Object[][]{ new Object[] { "Select * from c where c.id = 'wrongId'", true }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", true }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", true }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", true }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", true }, new Object[] { "Select * from c where c.id = 'wrongId'", false }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId'", false }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false }, }; } @DataProvider(name = "readAllItemsOfLogicalPartition") private Object[][] readAllItemsOfLogicalPartition() { return new Object[][]{ new Object[] { 1, true }, new Object[] { 5, null }, new Object[] { 20, null }, new Object[] { 1, false }, new Object[] { 5, false }, new Object[] { 20, false }, }; } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void gatewayDiagnostics() throws Exception { CosmosClient testGatewayClient = null; try { testGatewayClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); CosmosContainer container = testGatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"operationType\":\"Create\""); assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\""); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotNull(); validateTransportRequestTimelineGateway(diagnostics); validateRegionContacted(createResponse.getDiagnostics(), testGatewayClient.asyncClient()); isValidJSON(diagnostics); } finally { if (testGatewayClient != null) { testGatewayClient.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void gatewayDiagnosticsOnException() throws Exception { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = null; try { createResponse = this.container.createItem(internalObjectNode); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey")); CosmosItemResponse<InternalObjectNode> readResponse = this.container.readItem(BridgeInternal.getProperties(createResponse).getId(), new PartitionKey("wrongPartitionKey"), InternalObjectNode.class); fail("request should fail as partition key is wrong"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"statusCode\":404"); assertThat(diagnostics).contains("\"operationType\":\"Read\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotNull(); validateRegionContacted(createResponse.getDiagnostics(), this.container.asyncContainer.getDatabase().getClient()); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); validateTransportRequestTimelineGateway(diagnostics); isValidJSON(diagnostics); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void systemDiagnosticsForSystemStateInformation() { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = this.container.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("systemInformation"); assertThat(diagnostics).contains("usedMemory"); assertThat(diagnostics).contains("availableMemory"); assertThat(diagnostics).contains("systemCpuLoad"); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnostics() throws Exception { CosmosClient testDirectClient = null; try { testDirectClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer cosmosContainer = testDirectClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("supplementalResponseStatisticsList"); assertThat(diagnostics).contains("\"gatewayStatistics\":null"); assertThat(diagnostics).contains("addressResolutionStatistics"); assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\""); assertThat(diagnostics).contains("\"metaDataName\":\"PARTITION_KEY_RANGE_LOOK_UP\""); assertThat(diagnostics).contains("\"metaDataName\":\"SERVER_ADDRESS_LOOKUP\""); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).contains("\"backendLatencyInMs\""); assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotEmpty(); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); validateTransportRequestTimelineDirect(diagnostics); validateRegionContacted(createResponse.getDiagnostics(), testDirectClient.asyncClient()); isValidJSON(diagnostics); try { cosmosContainer.createItem(internalObjectNode); fail("expected 409"); } catch (CosmosException e) { diagnostics = e.getDiagnostics().toString(); assertThat(diagnostics).contains("\"backendLatencyInMs\""); validateTransportRequestTimelineDirect(e.getDiagnostics().toString()); } } finally { if (testDirectClient != null) { testDirectClient.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryPlanDiagnostics() throws JsonProcessingException { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); List<String> itemIdList = new ArrayList<>(); for(int i = 0; i< 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); if(i%20 == 0) { itemIdList.add(internalObjectNode.getId()); } } String queryDiagnostics = null; List<String> queryList = new ArrayList<>(); queryList.add("Select * from c"); StringBuilder queryBuilder = new StringBuilder("SELECT * from c where c.mypk in ("); for(int i = 0 ; i < itemIdList.size(); i++){ queryBuilder.append("'").append(itemIdList.get(i)).append("'"); if(i < (itemIdList.size()-1)) { queryBuilder.append(","); } else { queryBuilder.append(")"); } } queryList.add(queryBuilder.toString()); queryList.add("Select * from c where c.id = 'wrongId'"); for(String query : queryList) { int feedResponseCounter = 0; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(true); Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); if (feedResponseCounter == 0) { assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)="); String requestTimeLine = OBJECT_MAPPER.writeValueAsString(feedResponse.getCosmosDiagnostics().getFeedResponseDiagnostics().getQueryPlanDiagnosticsContext().getRequestTimeline()); assertThat(requestTimeLine).contains("connectionConfigured"); assertThat(requestTimeLine).contains("requestSent"); assertThat(requestTimeLine).contains("transitTime"); assertThat(requestTimeLine).contains("received"); } else { assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan RequestTimeline ="); } feedResponseCounter++; } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryMetricsWithIndexMetrics() { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); List<String> itemIdList = new ArrayList<>(); for(int i = 0; i< 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); if(i%20 == 0) { itemIdList.add(internalObjectNode.getId()); } } String queryDiagnostics = null; List<String> queryList = new ArrayList<>(); StringBuilder queryBuilder = new StringBuilder("SELECT * from c where c.mypk in ("); for(int i = 0 ; i < itemIdList.size(); i++){ queryBuilder.append("'").append(itemIdList.get(i)).append("'"); if(i < (itemIdList.size()-1)) { queryBuilder.append(","); } else { queryBuilder.append(")"); } } queryList.add(queryBuilder.toString()); for (String query : queryList) { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(true); options.setIndexMetricsEnabled(true); Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); logger.info("This is query diagnostics {}", queryDiagnostics); if (feedResponse.getResponseHeaders().containsKey(HttpConstants.HttpHeaders.INDEX_UTILIZATION)) { assertThat(feedResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.INDEX_UTILIZATION)).isNotNull(); assertThat(createFromJSONString(Utils.decodeBase64String(feedResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.INDEX_UTILIZATION))).getUtilizedSingleIndexes()).isNotNull(); } } } } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT) public void queryMetrics(String query, Boolean qmEnabled) { CosmosContainer directContainer = this.directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()) .getContainer(this.cosmosAsyncContainer.getId()); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } boolean qroupByFirstResponse = true; Iterator<FeedResponse<InternalObjectNode>> iterator = directContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); assertThat(iterator.hasNext()).isTrue(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); assertThat(feedResponse.getResults().size()).isEqualTo(0); if (!query.contains("group by") || qroupByFirstResponse) { validateQueryDiagnostics(queryDiagnostics, qmEnabled, true); validateDirectModeQueryDiagnostics(queryDiagnostics); if (query.contains("group by")) { qroupByFirstResponse = false; } } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) private void validateDirectModeQueryDiagnostics(String diagnostics) { assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("supplementalResponseStatisticsList"); assertThat(diagnostics).contains("responseStatisticsList"); assertThat(diagnostics).contains("\"gatewayStatistics\":null"); assertThat(diagnostics).contains("addressResolutionStatistics"); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); } private void validateGatewayModeQueryDiagnostics(String diagnostics) { assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"operationType\":\"Query\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).contains("\"regionsContacted\""); } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT*2) public void queryDiagnosticsGatewayMode(String query, Boolean qmEnabled) { CosmosClient testDirectClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .gatewayMode() .buildClient(); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); CosmosContainer cosmosContainer = testDirectClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()) .getContainer(cosmosAsyncContainer.getId()); List<String> itemIdList = new ArrayList<>(); for (int i = 0; i < 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); if (i % 20 == 0) { itemIdList.add(internalObjectNode.getId()); } } boolean qroupByFirstResponse = true; if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } Iterator<FeedResponse<InternalObjectNode>> iterator = cosmosContainer .queryItems(query, options, InternalObjectNode.class) .iterableByPage() .iterator(); assertThat(iterator.hasNext()).isTrue(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); assertThat(feedResponse.getResults().size()).isEqualTo(0); if (!query.contains("group by") || qroupByFirstResponse) { validateQueryDiagnostics(queryDiagnostics, qmEnabled, true); validateGatewayModeQueryDiagnostics(queryDiagnostics); if (query.contains("group by")) { qroupByFirstResponse = false; } } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryMetricsWithADifferentLocale() { Locale.setDefault(Locale.GERMAN); String query = "select * from root where root.id= \"someid\""; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); Iterator<FeedResponse<InternalObjectNode>> iterator = this.container.queryItems(query, options, InternalObjectNode.class) .iterableByPage().iterator(); double requestCharge = 0; while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); requestCharge += feedResponse.getRequestCharge(); } assertThat(requestCharge).isGreaterThan(0); Locale.setDefault(Locale.ROOT); } private static void validateQueryDiagnostics( String queryDiagnostics, Boolean qmEnabled, boolean expectQueryPlanDiagnostics) { if (qmEnabled == null || qmEnabled) { assertThat(queryDiagnostics).contains("Retrieved Document Count"); assertThat(queryDiagnostics).contains("Query Preparation Times"); assertThat(queryDiagnostics).contains("Runtime Execution Times"); assertThat(queryDiagnostics).contains("Partition Execution Timeline"); } else { assertThat(queryDiagnostics).doesNotContain("Retrieved Document Count"); assertThat(queryDiagnostics).doesNotContain("Query Preparation Times"); assertThat(queryDiagnostics).doesNotContain("Runtime Execution Times"); assertThat(queryDiagnostics).doesNotContain("Partition Execution Timeline"); } if (expectQueryPlanDiagnostics) { assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)="); } else { assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)="); } } @Test(groups = {"simple"}, dataProvider = "readAllItemsOfLogicalPartition", timeOut = TIMEOUT) public void queryMetricsForReadAllItemsOfLogicalPartition(Integer expectedItemCount, Boolean qmEnabled) { String pkValue = UUID.randomUUID().toString(); for (int i = 0; i < expectedItemCount; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(pkValue); CosmosItemResponse<InternalObjectNode> createResponse = container.createItem(internalObjectNode); } CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options = options.setQueryMetricsEnabled(qmEnabled); } ModelBridgeInternal.setQueryRequestOptionsMaxItemCount(options, 5); Iterator<FeedResponse<InternalObjectNode>> iterator = this.container .readAllItems( new PartitionKey(pkValue), options, InternalObjectNode.class) .iterableByPage().iterator(); assertThat(iterator.hasNext()).isTrue(); int actualItemCount = 0; while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); actualItemCount += feedResponse.getResults().size(); validateQueryDiagnostics(queryDiagnostics, qmEnabled, false); } assertThat(actualItemCount).isEqualTo(expectedItemCount); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnosticsOnException() throws Exception { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = null; CosmosClient client = null; try { client = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); createResponse = container.createItem(internalObjectNode); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey")); CosmosItemResponse<InternalObjectNode> readResponse = cosmosContainer.readItem(BridgeInternal.getProperties(createResponse).getId(), new PartitionKey("wrongPartitionKey"), InternalObjectNode.class); fail("request should fail as partition key is wrong"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(exception.getDiagnostics().getContactedRegionNames()).isNotEmpty(); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); assertThat(diagnostics).contains("\"backendLatencyInMs\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); isValidJSON(diagnostics); validateTransportRequestTimelineDirect(diagnostics); validateRegionContacted(createResponse.getDiagnostics(), client.asyncClient()); } finally { if (client != null) { client.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnosticsOnMetadataException() { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosClient client = null; try { client = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); HttpClient mockHttpClient = Mockito.mock(HttpClient.class); Mockito.when(mockHttpClient.send(Mockito.any(HttpRequest.class), Mockito.any(Duration.class))) .thenReturn(Mono.error(new CosmosException(400, "TestBadRequest"))); RxStoreModel rxGatewayStoreModel = rxGatewayStoreModel = ReflectionUtils.getGatewayProxy((RxDocumentClientImpl) client.asyncClient().getDocClientWrapper()); ReflectionUtils.setGatewayHttpClient(rxGatewayStoreModel, mockHttpClient); container.createItem(internalObjectNode); fail("request should fail as bad request"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.BADREQUEST); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(diagnostics).contains("\"resourceType\":\"DocumentCollection\""); assertThat(exception.getDiagnostics().getContactedRegionNames()).isNotEmpty(); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); isValidJSON(diagnostics); } finally { if (client != null) { client.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void supplementalResponseStatisticsList() throws Exception { ClientSideRequestStatistics clientSideRequestStatistics = new ClientSideRequestStatistics(mockDiagnosticsClientContext(),null); for (int i = 0; i < 15; i++) { RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document); clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null); } List<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); ObjectMapper objectMapper = new ObjectMapper(); String diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics); JsonNode jsonNode = objectMapper.readTree(diagnostics); ArrayNode supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList"); assertThat(storeResponseStatistics.size()).isEqualTo(15); assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(10); clearStoreResponseStatistics(clientSideRequestStatistics); storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); assertThat(storeResponseStatistics.size()).isEqualTo(0); for (int i = 0; i < 7; i++) { RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document); clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null); } storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); objectMapper = new ObjectMapper(); diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics); jsonNode = objectMapper.readTree(diagnostics); supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList"); assertThat(storeResponseStatistics.size()).isEqualTo(7); assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(7); for(JsonNode node : supplementalResponseStatisticsListNode) { assertThat(node.get("storeResult").asText()).isNotNull(); String requestResponseTimeUTC = node.get("requestResponseTimeUTC").asText(); Instant instant = Instant.from(RESPONSE_TIME_FORMATTER.parse(requestResponseTimeUTC)); assertThat(Instant.now().toEpochMilli() - instant.toEpochMilli()).isLessThan(5000); assertThat(node.get("requestResponseTimeUTC")).isNotNull(); assertThat(node.get("requestOperationType")).isNotNull(); assertThat(node.get("requestOperationType")).isNotNull(); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void serializationOnVariousScenarios() { CosmosDatabaseResponse cosmosDatabase = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).read(); String diagnostics = cosmosDatabase.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"DATABASE_DESERIALIZATION\""); CosmosContainerResponse containerResponse = this.container.read(); diagnostics = containerResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"CONTAINER_DESERIALIZATION\""); TestItem testItem = new TestItem(); testItem.id = "TestId"; testItem.mypk = "TestPk"; CosmosItemResponse<TestItem> itemResponse = this.container.createItem(testItem); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); testItem.id = "TestId2"; testItem.mypk = "TestPk"; itemResponse = this.container.createItem(testItem, new PartitionKey("TestPk"), null); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).doesNotContain("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).doesNotContain("\"serializationType\":\"ITEM_DESERIALIZATION\""); TestItem readTestItem = itemResponse.getItem(); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\""); CosmosItemResponse<InternalObjectNode> readItemResponse = this.container.readItem(testItem.id, new PartitionKey(testItem.mypk), null, InternalObjectNode.class); InternalObjectNode properties = readItemResponse.getItem(); diagnostics = readItemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void rntbdRequestResponseLengthStatistics() throws Exception { TestItem testItem = new TestItem(); testItem.id = UUID.randomUUID().toString(); testItem.mypk = UUID.randomUUID().toString(); int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length; CosmosContainer container = directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId()); CosmosItemResponse<TestItem> createItemResponse = container.createItem(testItem); validate(createItemResponse.getDiagnostics(), testItemLength, ModelBridgeInternal.getPayloadLength(createItemResponse)); try { container.createItem(testItem); fail("expected to fail due to 409"); } catch (CosmosException e) { validate(e.getDiagnostics(), testItemLength, 0); } CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class); validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse)); CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null); validate(deleteItemResponse.getDiagnostics(), 0, 0); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void rntbdStatistics() throws Exception { Instant beforeClientInitialization = Instant.now(); CosmosClient client1 = null; try { client1 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .directMode() .buildClient(); TestItem testItem = new TestItem(); testItem.id = UUID.randomUUID().toString(); testItem.mypk = UUID.randomUUID().toString(); int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length; CosmosContainer container = client1.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId()); Thread.sleep(1000); Instant beforeInitializingRntbdServiceEndpoint = Instant.now(); CosmosItemResponse<TestItem> operation1Response = container.upsertItem(testItem); Instant afterInitializingRntbdServiceEndpoint = Instant.now(); Thread.sleep(1000); Instant beforeOperation2 = Instant.now(); CosmosItemResponse<TestItem> operation2Response = container.upsertItem(testItem); Instant afterOperation2 = Instant.now(); Thread.sleep(1000); Instant beforeOperation3 = Instant.now(); CosmosItemResponse<TestItem> operation3Response = container.upsertItem(testItem); Instant afterOperation3 = Instant.now(); validateRntbdStatistics(operation3Response.getDiagnostics(), beforeClientInitialization, beforeInitializingRntbdServiceEndpoint, afterInitializingRntbdServiceEndpoint, beforeOperation2, afterOperation2, beforeOperation3, afterOperation3); CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class); validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse)); CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null); validate(deleteItemResponse.getDiagnostics(), 0, 0); } finally { LifeCycleUtils.closeQuietly(client1); } } private void validateRntbdStatistics(CosmosDiagnostics cosmosDiagnostics, Instant clientInitializationTime, Instant beforeInitializingRntbdServiceEndpoint, Instant afterInitializingRntbdServiceEndpoint, Instant beforeOperation2, Instant afterOperation2, Instant beforeOperation3, Instant afterOperation3) throws Exception { ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString()); JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); assertThat(responseStatisticsList.size()).isGreaterThan(0); JsonNode storeResult = responseStatisticsList.get(0).get("storeResult"); assertThat(storeResult).isNotNull(); boolean hasPayload = storeResult.get("exception").isNull(); assertThat(storeResult.get("channelTaskQueueSize").asInt(-1)).isGreaterThan(0); assertThat(storeResult.get("pendingRequestsCount").asInt(-1)).isGreaterThanOrEqualTo(0); JsonNode serviceEndpointStatistics = storeResult.get("serviceEndpointStatistics"); assertThat(serviceEndpointStatistics).isNotNull(); assertThat(serviceEndpointStatistics.get("availableChannels").asInt(-1)).isGreaterThan(0); assertThat(serviceEndpointStatistics.get("acquiredChannels").asInt(-1)).isEqualTo(0); assertThat(serviceEndpointStatistics.get("inflightRequests").asInt(-1)).isEqualTo(1); assertThat(serviceEndpointStatistics.get("isClosed").asBoolean()).isEqualTo(false); Instant beforeInitializationThreshold = beforeInitializingRntbdServiceEndpoint.minusMillis(1); assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText())) .isAfterOrEqualTo(beforeInitializationThreshold); Instant afterInitializationThreshold = afterInitializingRntbdServiceEndpoint.plusMillis(2); assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText())) .isBeforeOrEqualTo(afterInitializationThreshold); Instant afterOperation2Threshold = afterOperation2.plusMillis(2); Instant beforeOperation2Threshold = beforeOperation2.minusMillis(2); assertThat(Instant.parse(serviceEndpointStatistics.get("lastRequestTime").asText())) .isAfterOrEqualTo(beforeOperation2Threshold) .isBeforeOrEqualTo(afterOperation2Threshold); assertThat(Instant.parse(serviceEndpointStatistics.get("lastSuccessfulRequestTime").asText())) .isAfterOrEqualTo(beforeOperation2Threshold) .isBeforeOrEqualTo(afterOperation2Threshold); } private void validate(CosmosDiagnostics cosmosDiagnostics, int expectedRequestPayloadSize, int expectedResponsePayloadSize) throws Exception { ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString()); JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); assertThat(responseStatisticsList.size()).isGreaterThan(0); JsonNode storeResult = responseStatisticsList.get(0).get("storeResult"); boolean hasPayload = storeResult.get("exception").isNull(); assertThat(storeResult).isNotNull(); assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize); assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize); assertThat(storeResult.get("requestPayloadLengthInBytes").asInt(-1)).isEqualTo(expectedRequestPayloadSize); if (hasPayload) { assertThat(storeResult.get("responsePayloadLengthInBytes").asInt(-1)).isEqualTo(expectedResponsePayloadSize); } assertThat(storeResult.get("rntbdResponseLengthInBytes").asInt(-1)).isGreaterThan(expectedResponsePayloadSize); } @Test(groups = {"emulator"}, timeOut = TIMEOUT) public void addressResolutionStatistics() { CosmosClient client1 = null; CosmosClient client2 = null; String databaseId = DatabaseForTest.generateId(); String containerId = UUID.randomUUID().toString(); CosmosDatabase cosmosDatabase = null; CosmosContainer cosmosContainer = null; try { client1 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); client1.createDatabase(databaseId); cosmosDatabase = client1.getDatabase(databaseId); cosmosDatabase.createContainer(containerId, "/mypk"); InternalObjectNode internalObjectNode = getInternalObjectNode(); cosmosContainer = cosmosDatabase.getContainer(containerId); CosmosItemResponse<InternalObjectNode> writeResourceResponse = cosmosContainer.createItem(internalObjectNode); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics"); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false"); assertThat(writeResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\""); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":null"); assertThat(writeResourceResponse.getDiagnostics().toString()).doesNotContain("\"errorMessage\":\"io.netty" + ".channel.AbstractChannel$AnnotatedConnectException: Connection refused: no further information"); client2 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); cosmosDatabase = client2.getDatabase(databaseId); cosmosContainer = cosmosDatabase.getContainer(containerId); AsyncDocumentClient asyncDocumentClient = client2.asyncClient().getContextClient(); GlobalAddressResolver addressResolver = (GlobalAddressResolver) FieldUtils.readField(asyncDocumentClient, "addressResolver", true); @SuppressWarnings("rawtypes") Map addressCacheByEndpoint = (Map) FieldUtils.readField(addressResolver, "addressCacheByEndpoint", true); Object endpointCache = addressCacheByEndpoint.values().toArray()[0]; GatewayAddressCache addressCache = (GatewayAddressCache) FieldUtils.readField(endpointCache, "addressCache", true); HttpClient httpClient = httpClient(true); FieldUtils.writeField(addressCache, "httpClient", httpClient, true); new Thread(() -> { try { Thread.sleep(5000); HttpClient httpClient1 = httpClient(false); FieldUtils.writeField(addressCache, "httpClient", httpClient1, true); } catch (Exception e) { fail(e.getMessage()); } }).start(); PartitionKey partitionKey = new PartitionKey(internalObjectNode.get("mypk")); CosmosItemResponse<InternalObjectNode> readResourceResponse = cosmosContainer.readItem(internalObjectNode.getId(), partitionKey, new CosmosItemRequestOptions(), InternalObjectNode.class); assertThat(readResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics"); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false"); assertThat(readResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\""); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":null"); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"errorMessage\":\"io.netty" + ".channel.AbstractChannel$AnnotatedConnectException: Connection refused"); } catch (Exception ex) { logger.error("Error in test addressResolutionStatistics", ex); fail("This test should not throw exception " + ex); } finally { safeDeleteSyncDatabase(cosmosDatabase); if (client1 != null) { client1.close(); } if (client2 != null) { client2.close(); } } } private InternalObjectNode getInternalObjectNode() { InternalObjectNode internalObjectNode = new InternalObjectNode(); String uuid = UUID.randomUUID().toString(); internalObjectNode.setId(uuid); BridgeInternal.setProperty(internalObjectNode, "mypk", uuid); return internalObjectNode; } private InternalObjectNode getInternalObjectNode(String pkValue) { InternalObjectNode internalObjectNode = new InternalObjectNode(); String uuid = UUID.randomUUID().toString(); internalObjectNode.setId(uuid); BridgeInternal.setProperty(internalObjectNode, "mypk", pkValue); return internalObjectNode; } private List<ClientSideRequestStatistics.StoreResponseStatistics> getStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception { Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList"); storeResponseStatisticsField.setAccessible(true); @SuppressWarnings({"unchecked"}) List<ClientSideRequestStatistics.StoreResponseStatistics> list = (List<ClientSideRequestStatistics.StoreResponseStatistics>) storeResponseStatisticsField.get(requestStatistics); return list; } private void clearStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception { Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList"); storeResponseStatisticsField.setAccessible(true); storeResponseStatisticsField.set(requestStatistics, new ArrayList<ClientSideRequestStatistics.StoreResponseStatistics>()); } private void validateTransportRequestTimelineGateway(String diagnostics) { assertThat(diagnostics).contains("\"eventName\":\"connectionConfigured\""); assertThat(diagnostics).contains("\"eventName\":\"connectionConfigured\""); assertThat(diagnostics).contains("\"eventName\":\"requestSent\""); assertThat(diagnostics).contains("\"eventName\":\"transitTime\""); assertThat(diagnostics).contains("\"eventName\":\"received\""); } private void validateTransportRequestTimelineDirect(String diagnostics) { assertThat(diagnostics).contains("\"eventName\":\"created\""); assertThat(diagnostics).contains("\"eventName\":\"queued\""); assertThat(diagnostics).contains("\"eventName\":\"channelAcquisitionStarted\""); assertThat(diagnostics).contains("\"eventName\":\"pipelined\""); assertThat(diagnostics).contains("\"eventName\":\"transitTime\""); assertThat(diagnostics).contains("\"eventName\":\"received\""); assertThat(diagnostics).contains("\"eventName\":\"completed\""); assertThat(diagnostics).contains("\"startTimeUTC\""); assertThat(diagnostics).contains("\"durationInMicroSec\""); } public void isValidJSON(final String json) { try { final JsonParser parser = new ObjectMapper().createParser(json); while (parser.nextToken() != null) { } } catch (IOException ex) { fail("Diagnostic string is not in json format ", ex); } } private HttpClient httpClient(boolean fakeProxy) { HttpClientConfig httpClientConfig; if(fakeProxy) { httpClientConfig = new HttpClientConfig(new Configs()) .withProxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress("localhost", 8888))); } else { httpClientConfig = new HttpClientConfig(new Configs()); } return HttpClient.createFixed(httpClientConfig); } private IndexUtilizationInfo createFromJSONString(String jsonString) { ObjectMapper indexUtilizationInfoObjectMapper = new ObjectMapper(); IndexUtilizationInfo indexUtilizationInfo = null; try { indexUtilizationInfo = indexUtilizationInfoObjectMapper.readValue(jsonString, IndexUtilizationInfo.class); } catch (JsonProcessingException e) { logger.error("Json not correctly formed ", e); } return indexUtilizationInfo; } private void validateRegionContacted(CosmosDiagnostics cosmosDiagnostics, CosmosAsyncClient cosmosAsyncClient) throws Exception { RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) ReflectionUtils.getAsyncDocumentClient(cosmosAsyncClient); GlobalEndpointManager globalEndpointManager = ReflectionUtils.getGlobalEndpointManager(rxDocumentClient); LocationCache locationCache = ReflectionUtils.getLocationCache(globalEndpointManager); Field locationInfoField = LocationCache.class.getDeclaredField("locationInfo"); locationInfoField.setAccessible(true); Object locationInfo = locationInfoField.get(locationCache); Class<?> DatabaseAccountLocationsInfoClass = Class.forName("com.azure.cosmos.implementation.routing" + ".LocationCache$DatabaseAccountLocationsInfo"); Field availableWriteEndpointByLocation = DatabaseAccountLocationsInfoClass.getDeclaredField( "availableWriteEndpointByLocation"); availableWriteEndpointByLocation.setAccessible(true); @SuppressWarnings("unchecked") Map<String, URI> map = (Map<String, URI>) availableWriteEndpointByLocation.get(locationInfo); String regionName = map.keySet().iterator().next(); assertThat(cosmosDiagnostics.getContactedRegionNames().size()).isEqualTo(1); assertThat(cosmosDiagnostics.getContactedRegionNames().iterator().next()).isEqualTo(regionName.toLowerCase()); } public static class TestItem { public String id; public String mypk; public TestItem() { } } }
For reviewers, I'm not sure the approach of calculating the size of described type is correct or not. The serialization of message works well. If there are something wrong, please let me change.
private static int sizeof(Object obj) { if (obj == null) { return 0; } if (obj instanceof String) { return obj.toString().length() << 1; } if (obj instanceof Symbol) { return ((Symbol) obj).length() << 1; } if (obj instanceof Byte || obj instanceof UnsignedByte) { return Byte.BYTES; } if (obj instanceof Integer || obj instanceof UnsignedInteger) { return Integer.BYTES; } if (obj instanceof Long || obj instanceof UnsignedLong || obj instanceof Date) { return Long.BYTES; } if (obj instanceof Short || obj instanceof UnsignedShort) { return Short.BYTES; } if (obj instanceof Boolean) { return 1; } if (obj instanceof Character) { return 4; } if (obj instanceof Float) { return Float.BYTES; } if (obj instanceof Double) { return Double.BYTES; } if (obj instanceof UUID) { return 16; } if (obj instanceof Decimal32) { return 4; } if (obj instanceof Decimal64) { return 8; } if (obj instanceof Decimal128) { return 16; } if (obj instanceof Binary) { return ((Binary) obj).getLength(); } if (obj instanceof Declare) { return 7; } if (obj instanceof Discharge) { Discharge discharge = (Discharge) obj; return 12 + discharge.getTxnId().getLength(); } if (obj instanceof ServiceBusDescribedType) { ServiceBusDescribedType describedType = (ServiceBusDescribedType) obj; int size = ((Symbol) describedType.getDescriptor()).length() << 1; if (describedType.getDescribed() instanceof String) { size += ((String) describedType.getDescribed()).length(); } else if (describedType.getDescribed() instanceof Long) { size += Long.BYTES; } return size; } if (obj instanceof Map) { int size = 8; Map map = (Map) obj; for (Object value : map.keySet()) { size += sizeof(value); } for (Object value : map.values()) { size += sizeof(value); } return size; } if (obj instanceof Iterable) { int size = 8; for (Object innerObject : (Iterable) obj) { size += sizeof(innerObject); } return size; } if (obj.getClass().isArray()) { int size = 8; int length = Array.getLength(obj); for (int i = 0; i < length; i++) { size += sizeof(Array.get(obj, i)); } return size; } throw new IllegalArgumentException(String.format(Locale.US, "Encoding Type: %s is not supported", obj.getClass())); }
}
private static int sizeof(Object obj) { if (obj == null) { return 0; } if (obj instanceof String) { return obj.toString().length() << 1; } if (obj instanceof Symbol) { return ((Symbol) obj).length() << 1; } if (obj instanceof Byte || obj instanceof UnsignedByte) { return Byte.BYTES; } if (obj instanceof Integer || obj instanceof UnsignedInteger) { return Integer.BYTES; } if (obj instanceof Long || obj instanceof UnsignedLong || obj instanceof Date) { return Long.BYTES; } if (obj instanceof Short || obj instanceof UnsignedShort) { return Short.BYTES; } if (obj instanceof Boolean) { return 1; } if (obj instanceof Character) { return 4; } if (obj instanceof Float) { return Float.BYTES; } if (obj instanceof Double) { return Double.BYTES; } if (obj instanceof UUID) { return 16; } if (obj instanceof Decimal32) { return 4; } if (obj instanceof Decimal64) { return 8; } if (obj instanceof Decimal128) { return 16; } if (obj instanceof Binary) { return ((Binary) obj).getLength(); } if (obj instanceof Declare) { return 7; } if (obj instanceof Discharge) { Discharge discharge = (Discharge) obj; return 12 + discharge.getTxnId().getLength(); } if (obj instanceof ServiceBusDescribedType) { ServiceBusDescribedType describedType = (ServiceBusDescribedType) obj; return describedType.size(); } if (obj instanceof Map) { int size = 8; Map map = (Map) obj; for (Object value : map.keySet()) { size += sizeof(value); } for (Object value : map.values()) { size += sizeof(value); } return size; } if (obj instanceof Iterable) { int size = 8; for (Object innerObject : (Iterable) obj) { size += sizeof(innerObject); } return size; } if (obj.getClass().isArray()) { int size = 8; int length = Array.getLength(obj); for (int i = 0; i < length; i++) { size += sizeof(Array.get(obj, i)); } return size; } throw new IllegalArgumentException(String.format(Locale.US, "Encoding Type: %s is not supported", obj.getClass())); }
class ServiceBusMessageSerializer implements MessageSerializer { private static final byte[] EMPTY_BYTE_ARRAY = new byte[0]; private final ClientLogger logger = new ClientLogger(ServiceBusMessageSerializer.class); /** * Gets the serialized size of the AMQP message. */ @Override public int getSize(Message amqpMessage) { if (amqpMessage == null) { return 0; } int payloadSize = getPayloadSize(amqpMessage); final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); final ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); int annotationsSize = 0; int applicationPropertiesSize = 0; if (messageAnnotations != null) { final Map<Symbol, Object> map = messageAnnotations.getValue(); for (Map.Entry<Symbol, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); annotationsSize += size; } } if (applicationProperties != null) { final Map<String, Object> map = applicationProperties.getValue(); for (Map.Entry<String, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); applicationPropertiesSize += size; } } return annotationsSize + applicationPropertiesSize + payloadSize; } /** * Creates the AMQP message represented by this {@code object}. Currently, only supports serializing {@link * ServiceBusMessage}. * * @param object Concrete object to deserialize. * * @return A new AMQP message for this {@code object}. * * @throws IllegalArgumentException if {@code object} is not an instance of {@link ServiceBusMessage}. */ @Override public <T> Message serialize(T object) { Objects.requireNonNull(object, "'object' to serialize cannot be null."); if (!(object instanceof ServiceBusMessage)) { throw logger.logExceptionAsError(new IllegalArgumentException( "Cannot serialize object that is not ServiceBusMessage. Clazz: " + object.getClass())); } final ServiceBusMessage brokeredMessage = (ServiceBusMessage) object; AmqpMessageBodyType brokeredBodyType = brokeredMessage.getRawAmqpMessage().getBody().getBodyType(); final Message amqpMessage = Proton.message(); byte[] body; if (brokeredBodyType == AmqpMessageBodyType.DATA || brokeredBodyType == null) { body = brokeredMessage.getBody().toBytes(); amqpMessage.setBody(new Data(new Binary(body))); } else if (brokeredBodyType == AmqpMessageBodyType.SEQUENCE) { List<Object> sequenceList = brokeredMessage.getRawAmqpMessage().getBody().getSequence(); amqpMessage.setBody(new AmqpSequence(sequenceList)); } else if (brokeredBodyType == AmqpMessageBodyType.VALUE) { amqpMessage.setBody(new AmqpValue(brokeredMessage.getRawAmqpMessage().getBody().getValue())); } if (brokeredMessage.getApplicationProperties() != null) { amqpMessage.setApplicationProperties(new ApplicationProperties(convertToDescribedType(brokeredMessage.getApplicationProperties()))); } if (brokeredMessage.getTimeToLive() != null) { amqpMessage.setTtl(brokeredMessage.getTimeToLive().toMillis()); } if (amqpMessage.getProperties() == null) { amqpMessage.setProperties(new Properties()); } amqpMessage.setMessageId(brokeredMessage.getMessageId()); amqpMessage.setContentType(brokeredMessage.getContentType()); amqpMessage.setCorrelationId(brokeredMessage.getCorrelationId()); amqpMessage.setSubject(brokeredMessage.getSubject()); amqpMessage.setReplyTo(brokeredMessage.getReplyTo()); amqpMessage.setReplyToGroupId(brokeredMessage.getReplyToSessionId()); amqpMessage.setGroupId(brokeredMessage.getSessionId()); final AmqpMessageProperties brokeredProperties = brokeredMessage.getRawAmqpMessage().getProperties(); amqpMessage.setContentEncoding(brokeredProperties.getContentEncoding()); if (brokeredProperties.getGroupSequence() != null) { amqpMessage.setGroupSequence(brokeredProperties.getGroupSequence()); } amqpMessage.getProperties().setTo(brokeredMessage.getTo()); amqpMessage.getProperties().setUserId(new Binary(brokeredProperties.getUserId())); if (brokeredProperties.getAbsoluteExpiryTime() != null) { amqpMessage.getProperties().setAbsoluteExpiryTime(Date.from(brokeredProperties.getAbsoluteExpiryTime() .toInstant())); } if (brokeredProperties.getCreationTime() != null) { amqpMessage.getProperties().setCreationTime(Date.from(brokeredProperties.getCreationTime().toInstant())); } amqpMessage.setFooter(new Footer(brokeredMessage.getRawAmqpMessage().getFooter())); AmqpMessageHeader header = brokeredMessage.getRawAmqpMessage().getHeader(); if (header.getDeliveryCount() != null) { amqpMessage.setDeliveryCount(header.getDeliveryCount()); } if (header.getPriority() != null) { amqpMessage.setPriority(header.getPriority()); } if (header.isDurable() != null) { amqpMessage.setDurable(header.isDurable()); } if (header.isFirstAcquirer() != null) { amqpMessage.setFirstAcquirer(header.isFirstAcquirer()); } if (header.getTimeToLive() != null) { amqpMessage.setTtl(header.getTimeToLive().toMillis()); } final Map<Symbol, Object> messageAnnotationsMap = new HashMap<>(); if (brokeredMessage.getScheduledEnqueueTime() != null) { messageAnnotationsMap.put(Symbol.valueOf(SCHEDULED_ENQUEUE_UTC_TIME_NAME.getValue()), Date.from(brokeredMessage.getScheduledEnqueueTime().toInstant())); } final String partitionKey = brokeredMessage.getPartitionKey(); if (partitionKey != null && !partitionKey.isEmpty()) { messageAnnotationsMap.put(Symbol.valueOf(PARTITION_KEY_ANNOTATION_NAME.getValue()), brokeredMessage.getPartitionKey()); } amqpMessage.setMessageAnnotations(new MessageAnnotations(messageAnnotationsMap)); final Map<Symbol, Object> deliveryAnnotationsMap = new HashMap<>(); final Map<String, Object> deliveryAnnotations = brokeredMessage.getRawAmqpMessage() .getDeliveryAnnotations(); for (Map.Entry<String, Object> deliveryEntry : deliveryAnnotations.entrySet()) { deliveryAnnotationsMap.put(Symbol.valueOf(deliveryEntry.getKey()), deliveryEntry.getValue()); } amqpMessage.setDeliveryAnnotations(new DeliveryAnnotations(deliveryAnnotationsMap)); return amqpMessage; } private Map<String, Object> convertToDescribedType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof URI) { propertiesValue.put(entry.getKey(), new ServiceBusDescribedType(ServiceBusConstants.URI_SYMBOL, ((URI) value).toString())); } else if (value instanceof OffsetDateTime) { propertiesValue.put(entry.getKey(), new ServiceBusDescribedType(ServiceBusConstants.OFFSETDATETIME_SYMBOL, ((OffsetDateTime) value).atZoneSameInstant(ZoneOffset.UTC).toInstant().toEpochMilli())); } else if (value instanceof Duration) { propertiesValue.put(entry.getKey(), new ServiceBusDescribedType(ServiceBusConstants.DURATION_SYMBOL, ((Duration) value).toNanos())); } } return propertiesValue; } private Map<String, Object> convertToOriginType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof DescribedType) { DescribedType describedType = (DescribedType) value; if (ServiceBusConstants.URI_SYMBOL.equals(describedType.getDescriptor())) { propertiesValue.put(entry.getKey(), URI.create((String) describedType.getDescribed())); } else if (ServiceBusConstants.OFFSETDATETIME_SYMBOL.equals(describedType.getDescriptor())) { propertiesValue.put(entry.getKey(), OffsetDateTime.ofInstant(Instant.ofEpochMilli((long) describedType.getDescribed()), ZoneId.systemDefault())); } else if (ServiceBusConstants.DURATION_SYMBOL.equals(describedType.getDescriptor())) { propertiesValue.put(entry.getKey(), Duration.ofNanos((long) describedType.getDescribed())); } } } return propertiesValue; } @SuppressWarnings("unchecked") @Override public <T> T deserialize(Message message, Class<T> clazz) { Objects.requireNonNull(message, "'message' cannot be null."); Objects.requireNonNull(clazz, "'clazz' cannot be null."); if (clazz == ServiceBusReceivedMessage.class) { return (T) deserializeMessage(message); } else { throw logger.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } @SuppressWarnings("unchecked") @Override public <T> List<T> deserializeList(Message message, Class<T> clazz) { if (clazz == ServiceBusReceivedMessage.class) { return (List<T>) deserializeListOfMessages(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == Long.class) { return (List<T>) deserializeListOfLong(message); } else { throw logger.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } private List<Long> deserializeListOfLong(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.SEQUENCE_NUMBERS); if (expirationListObj instanceof long[]) { return Arrays.stream((long[]) expirationListObj) .boxed() .collect(Collectors.toList()); } } } return Collections.emptyList(); } private List<OffsetDateTime> deserializeListOfOffsetDateTime(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.EXPIRATIONS); if (expirationListObj instanceof Date[]) { return Arrays.stream((Date[]) expirationListObj) .map(date -> date.toInstant().atOffset(ZoneOffset.UTC)) .collect(Collectors.toList()); } } } return Collections.emptyList(); } @SuppressWarnings("rawtypes") private List<ServiceBusReceivedMessage> deserializeListOfMessages(Message amqpMessage) { final List<ServiceBusReceivedMessage> messageList = new ArrayList<>(); final AmqpResponseCode statusCode = RequestResponseUtils.getStatusCode(amqpMessage); if (statusCode != AmqpResponseCode.OK) { logger.warning("AMQP response did not contain OK status code. Actual: {}", statusCode); return Collections.emptyList(); } final Object responseBodyMap = ((AmqpValue) amqpMessage.getBody()).getValue(); if (responseBodyMap == null) { logger.warning("AMQP response did not contain a body."); return Collections.emptyList(); } else if (!(responseBodyMap instanceof Map)) { logger.warning("AMQP response body is not correct instance. Expected: {}. Actual: {}", Map.class, responseBodyMap.getClass()); return Collections.emptyList(); } final Object messages = ((Map) responseBodyMap).get(ManagementConstants.MESSAGES); if (messages == null) { logger.warning("Response body did not contain key: {}", ManagementConstants.MESSAGES); return Collections.emptyList(); } else if (!(messages instanceof Iterable)) { logger.warning("Response body contents is not the correct type. Expected: {}. Actual: {}", Iterable.class, messages.getClass()); return Collections.emptyList(); } for (Object message : (Iterable) messages) { if (!(message instanceof Map)) { logger.warning("Message inside iterable of message is not correct type. Expected: {}. Actual: {}", Map.class, message.getClass()); continue; } final Message responseMessage = Message.Factory.create(); final Binary messagePayLoad = (Binary) ((Map) message).get(ManagementConstants.MESSAGE); responseMessage.decode(messagePayLoad.getArray(), messagePayLoad.getArrayOffset(), messagePayLoad.getLength()); final ServiceBusReceivedMessage receivedMessage = deserializeMessage(responseMessage); if (((Map) message).containsKey(ManagementConstants.LOCK_TOKEN_KEY)) { receivedMessage.setLockToken((UUID) ((Map) message).get(ManagementConstants.LOCK_TOKEN_KEY)); } messageList.add(receivedMessage); } return messageList; } private ServiceBusReceivedMessage deserializeMessage(Message amqpMessage) { final Section body = amqpMessage.getBody(); AmqpMessageBody amqpMessageBody; if (body != null) { if (body instanceof Data) { final Binary messageData = ((Data) body).getValue(); amqpMessageBody = AmqpMessageBody.fromData(messageData.getArray()); } else if (body instanceof AmqpValue) { amqpMessageBody = AmqpMessageBody.fromValue(((AmqpValue) body).getValue()); } else if (body instanceof AmqpSequence) { @SuppressWarnings("unchecked") List<Object> messageData = ((AmqpSequence) body).getValue(); amqpMessageBody = AmqpMessageBody.fromSequence(messageData); } else { logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, body.getType())); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } } else { logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, "null")); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } final ServiceBusReceivedMessage brokeredMessage = new ServiceBusReceivedMessage(amqpMessageBody); AmqpAnnotatedMessage brokeredAmqpAnnotatedMessage = brokeredMessage.getRawAmqpMessage(); ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); if (applicationProperties != null) { final Map<String, Object> propertiesValue = applicationProperties.getValue(); brokeredAmqpAnnotatedMessage.getApplicationProperties().putAll(convertToOriginType(propertiesValue)); } final AmqpMessageHeader brokeredHeader = brokeredAmqpAnnotatedMessage.getHeader(); brokeredHeader.setTimeToLive(Duration.ofMillis(amqpMessage.getTtl())); brokeredHeader.setDeliveryCount(amqpMessage.getDeliveryCount()); brokeredHeader.setDurable(amqpMessage.getHeader().getDurable()); brokeredHeader.setFirstAcquirer(amqpMessage.getHeader().getFirstAcquirer()); brokeredHeader.setPriority(amqpMessage.getPriority()); final Footer footer = amqpMessage.getFooter(); if (footer != null && footer.getValue() != null) { @SuppressWarnings("unchecked") final Map<Symbol, Object> footerValue = footer.getValue(); setValues(footerValue, brokeredAmqpAnnotatedMessage.getFooter()); } final AmqpMessageProperties brokeredProperties = brokeredAmqpAnnotatedMessage.getProperties(); brokeredProperties.setReplyToGroupId(amqpMessage.getReplyToGroupId()); final String replyTo = amqpMessage.getReplyTo(); if (replyTo != null) { brokeredProperties.setReplyTo(new AmqpAddress(amqpMessage.getReplyTo())); } final Object messageId = amqpMessage.getMessageId(); if (messageId != null) { brokeredProperties.setMessageId(new AmqpMessageId(messageId.toString())); } brokeredProperties.setContentType(amqpMessage.getContentType()); final Object correlationId = amqpMessage.getCorrelationId(); if (correlationId != null) { brokeredProperties.setCorrelationId(new AmqpMessageId(correlationId.toString())); } final Properties amqpProperties = amqpMessage.getProperties(); if (amqpProperties != null) { final String to = amqpProperties.getTo(); if (to != null) { brokeredProperties.setTo(new AmqpAddress(amqpProperties.getTo())); } if (amqpProperties.getAbsoluteExpiryTime() != null) { brokeredProperties.setAbsoluteExpiryTime(amqpProperties.getAbsoluteExpiryTime().toInstant() .atOffset(ZoneOffset.UTC)); } if (amqpProperties.getCreationTime() != null) { brokeredProperties.setCreationTime(amqpProperties.getCreationTime().toInstant() .atOffset(ZoneOffset.UTC)); } } brokeredProperties.setSubject(amqpMessage.getSubject()); brokeredProperties.setGroupId(amqpMessage.getGroupId()); brokeredProperties.setContentEncoding(amqpMessage.getContentEncoding()); brokeredProperties.setGroupSequence(amqpMessage.getGroupSequence()); brokeredProperties.setUserId(amqpMessage.getUserId()); final DeliveryAnnotations deliveryAnnotations = amqpMessage.getDeliveryAnnotations(); if (deliveryAnnotations != null) { setValues(deliveryAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getDeliveryAnnotations()); } final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); if (messageAnnotations != null) { setValues(messageAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getMessageAnnotations()); } if (amqpMessage instanceof MessageWithLockToken) { brokeredMessage.setLockToken(((MessageWithLockToken) amqpMessage).getLockToken()); } return brokeredMessage; } private static int getPayloadSize(Message msg) { if (msg == null || msg.getBody() == null) { return 0; } final Section bodySection = msg.getBody(); if (bodySection instanceof AmqpValue) { return sizeof(((AmqpValue) bodySection).getValue()); } else if (bodySection instanceof AmqpSequence) { return sizeof(((AmqpSequence) bodySection).getValue()); } else if (bodySection instanceof Data) { final Data payloadSection = (Data) bodySection; final Binary payloadBytes = payloadSection.getValue(); return sizeof(payloadBytes); } else { return 0; } } private void setValues(Map<Symbol, Object> sourceMap, Map<String, Object> targetMap) { if (sourceMap != null) { for (Map.Entry<Symbol, Object> entry : sourceMap.entrySet()) { targetMap.put(entry.getKey().toString(), entry.getValue()); } } } @SuppressWarnings("rawtypes") }
class ServiceBusMessageSerializer implements MessageSerializer { private static final byte[] EMPTY_BYTE_ARRAY = new byte[0]; private final ClientLogger logger = new ClientLogger(ServiceBusMessageSerializer.class); /** * Gets the serialized size of the AMQP message. */ @Override public int getSize(Message amqpMessage) { if (amqpMessage == null) { return 0; } int payloadSize = getPayloadSize(amqpMessage); final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); final ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); int annotationsSize = 0; int applicationPropertiesSize = 0; if (messageAnnotations != null) { final Map<Symbol, Object> map = messageAnnotations.getValue(); for (Map.Entry<Symbol, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); annotationsSize += size; } } if (applicationProperties != null) { final Map<String, Object> map = applicationProperties.getValue(); for (Map.Entry<String, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); applicationPropertiesSize += size; } } return annotationsSize + applicationPropertiesSize + payloadSize; } /** * Creates the AMQP message represented by this {@code object}. Currently, only supports serializing {@link * ServiceBusMessage}. * * @param object Concrete object to deserialize. * * @return A new AMQP message for this {@code object}. * * @throws IllegalArgumentException if {@code object} is not an instance of {@link ServiceBusMessage}. */ @Override public <T> Message serialize(T object) { Objects.requireNonNull(object, "'object' to serialize cannot be null."); if (!(object instanceof ServiceBusMessage)) { throw logger.logExceptionAsError(new IllegalArgumentException( "Cannot serialize object that is not ServiceBusMessage. Clazz: " + object.getClass())); } final ServiceBusMessage brokeredMessage = (ServiceBusMessage) object; AmqpMessageBodyType brokeredBodyType = brokeredMessage.getRawAmqpMessage().getBody().getBodyType(); final Message amqpMessage = Proton.message(); byte[] body; if (brokeredBodyType == AmqpMessageBodyType.DATA || brokeredBodyType == null) { body = brokeredMessage.getBody().toBytes(); amqpMessage.setBody(new Data(new Binary(body))); } else if (brokeredBodyType == AmqpMessageBodyType.SEQUENCE) { List<Object> sequenceList = brokeredMessage.getRawAmqpMessage().getBody().getSequence(); amqpMessage.setBody(new AmqpSequence(sequenceList)); } else if (brokeredBodyType == AmqpMessageBodyType.VALUE) { amqpMessage.setBody(new AmqpValue(brokeredMessage.getRawAmqpMessage().getBody().getValue())); } if (brokeredMessage.getApplicationProperties() != null) { Map<String, Object> describedTypeMap = convertToDescribedType(brokeredMessage.getApplicationProperties()); amqpMessage.setApplicationProperties(new ApplicationProperties(describedTypeMap)); } if (brokeredMessage.getTimeToLive() != null) { amqpMessage.setTtl(brokeredMessage.getTimeToLive().toMillis()); } if (amqpMessage.getProperties() == null) { amqpMessage.setProperties(new Properties()); } amqpMessage.setMessageId(brokeredMessage.getMessageId()); amqpMessage.setContentType(brokeredMessage.getContentType()); amqpMessage.setCorrelationId(brokeredMessage.getCorrelationId()); amqpMessage.setSubject(brokeredMessage.getSubject()); amqpMessage.setReplyTo(brokeredMessage.getReplyTo()); amqpMessage.setReplyToGroupId(brokeredMessage.getReplyToSessionId()); amqpMessage.setGroupId(brokeredMessage.getSessionId()); final AmqpMessageProperties brokeredProperties = brokeredMessage.getRawAmqpMessage().getProperties(); amqpMessage.setContentEncoding(brokeredProperties.getContentEncoding()); if (brokeredProperties.getGroupSequence() != null) { amqpMessage.setGroupSequence(brokeredProperties.getGroupSequence()); } amqpMessage.getProperties().setTo(brokeredMessage.getTo()); amqpMessage.getProperties().setUserId(new Binary(brokeredProperties.getUserId())); if (brokeredProperties.getAbsoluteExpiryTime() != null) { amqpMessage.getProperties().setAbsoluteExpiryTime(Date.from(brokeredProperties.getAbsoluteExpiryTime() .toInstant())); } if (brokeredProperties.getCreationTime() != null) { amqpMessage.getProperties().setCreationTime(Date.from(brokeredProperties.getCreationTime().toInstant())); } amqpMessage.setFooter(new Footer(brokeredMessage.getRawAmqpMessage().getFooter())); AmqpMessageHeader header = brokeredMessage.getRawAmqpMessage().getHeader(); if (header.getDeliveryCount() != null) { amqpMessage.setDeliveryCount(header.getDeliveryCount()); } if (header.getPriority() != null) { amqpMessage.setPriority(header.getPriority()); } if (header.isDurable() != null) { amqpMessage.setDurable(header.isDurable()); } if (header.isFirstAcquirer() != null) { amqpMessage.setFirstAcquirer(header.isFirstAcquirer()); } if (header.getTimeToLive() != null) { amqpMessage.setTtl(header.getTimeToLive().toMillis()); } final Map<Symbol, Object> messageAnnotationsMap = new HashMap<>(); if (brokeredMessage.getScheduledEnqueueTime() != null) { messageAnnotationsMap.put(Symbol.valueOf(SCHEDULED_ENQUEUE_UTC_TIME_NAME.getValue()), Date.from(brokeredMessage.getScheduledEnqueueTime().toInstant())); } final String partitionKey = brokeredMessage.getPartitionKey(); if (partitionKey != null && !partitionKey.isEmpty()) { messageAnnotationsMap.put(Symbol.valueOf(PARTITION_KEY_ANNOTATION_NAME.getValue()), brokeredMessage.getPartitionKey()); } amqpMessage.setMessageAnnotations(new MessageAnnotations(messageAnnotationsMap)); final Map<Symbol, Object> deliveryAnnotationsMap = new HashMap<>(); final Map<String, Object> deliveryAnnotations = brokeredMessage.getRawAmqpMessage() .getDeliveryAnnotations(); for (Map.Entry<String, Object> deliveryEntry : deliveryAnnotations.entrySet()) { deliveryAnnotationsMap.put(Symbol.valueOf(deliveryEntry.getKey()), deliveryEntry.getValue()); } amqpMessage.setDeliveryAnnotations(new DeliveryAnnotations(deliveryAnnotationsMap)); return amqpMessage; } /** * Convert specific type to described type for sending on the wire. * @param propertiesValue application properties set by user which may contain specific type. * @return Map only contains primitive type and described type. */ private static Map<String, Object> convertToDescribedType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof URI) { entry.setValue(new UriDescribedType((URI) value)); } else if (value instanceof OffsetDateTime) { entry.setValue(new OffsetDateTimeDescribedType((OffsetDateTime) value)); } else if (value instanceof Duration) { entry.setValue(new DurationDescribedType((Duration) value)); } } return propertiesValue; } /** * Convert described type to origin type. * @param propertiesValue application properties from amqp message may contain described type. * @return Map without described type. */ private static Map<String, Object> convertToOriginType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof DescribedType) { entry.setValue(MessageUtils.describedToOrigin((DescribedType) value)); } } return propertiesValue; } @SuppressWarnings("unchecked") @Override public <T> T deserialize(Message message, Class<T> clazz) { Objects.requireNonNull(message, "'message' cannot be null."); Objects.requireNonNull(clazz, "'clazz' cannot be null."); if (clazz == ServiceBusReceivedMessage.class) { return (T) deserializeMessage(message); } else { throw logger.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } @SuppressWarnings("unchecked") @Override public <T> List<T> deserializeList(Message message, Class<T> clazz) { if (clazz == ServiceBusReceivedMessage.class) { return (List<T>) deserializeListOfMessages(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == Long.class) { return (List<T>) deserializeListOfLong(message); } else { throw logger.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } private List<Long> deserializeListOfLong(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.SEQUENCE_NUMBERS); if (expirationListObj instanceof long[]) { return Arrays.stream((long[]) expirationListObj) .boxed() .collect(Collectors.toList()); } } } return Collections.emptyList(); } private List<OffsetDateTime> deserializeListOfOffsetDateTime(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.EXPIRATIONS); if (expirationListObj instanceof Date[]) { return Arrays.stream((Date[]) expirationListObj) .map(date -> date.toInstant().atOffset(ZoneOffset.UTC)) .collect(Collectors.toList()); } } } return Collections.emptyList(); } @SuppressWarnings("rawtypes") private List<ServiceBusReceivedMessage> deserializeListOfMessages(Message amqpMessage) { final List<ServiceBusReceivedMessage> messageList = new ArrayList<>(); final AmqpResponseCode statusCode = RequestResponseUtils.getStatusCode(amqpMessage); if (statusCode != AmqpResponseCode.OK) { logger.warning("AMQP response did not contain OK status code. Actual: {}", statusCode); return Collections.emptyList(); } final Object responseBodyMap = ((AmqpValue) amqpMessage.getBody()).getValue(); if (responseBodyMap == null) { logger.warning("AMQP response did not contain a body."); return Collections.emptyList(); } else if (!(responseBodyMap instanceof Map)) { logger.warning("AMQP response body is not correct instance. Expected: {}. Actual: {}", Map.class, responseBodyMap.getClass()); return Collections.emptyList(); } final Object messages = ((Map) responseBodyMap).get(ManagementConstants.MESSAGES); if (messages == null) { logger.warning("Response body did not contain key: {}", ManagementConstants.MESSAGES); return Collections.emptyList(); } else if (!(messages instanceof Iterable)) { logger.warning("Response body contents is not the correct type. Expected: {}. Actual: {}", Iterable.class, messages.getClass()); return Collections.emptyList(); } for (Object message : (Iterable) messages) { if (!(message instanceof Map)) { logger.warning("Message inside iterable of message is not correct type. Expected: {}. Actual: {}", Map.class, message.getClass()); continue; } final Message responseMessage = Message.Factory.create(); final Binary messagePayLoad = (Binary) ((Map) message).get(ManagementConstants.MESSAGE); responseMessage.decode(messagePayLoad.getArray(), messagePayLoad.getArrayOffset(), messagePayLoad.getLength()); final ServiceBusReceivedMessage receivedMessage = deserializeMessage(responseMessage); if (((Map) message).containsKey(ManagementConstants.LOCK_TOKEN_KEY)) { receivedMessage.setLockToken((UUID) ((Map) message).get(ManagementConstants.LOCK_TOKEN_KEY)); } messageList.add(receivedMessage); } return messageList; } private ServiceBusReceivedMessage deserializeMessage(Message amqpMessage) { final Section body = amqpMessage.getBody(); AmqpMessageBody amqpMessageBody; if (body != null) { if (body instanceof Data) { final Binary messageData = ((Data) body).getValue(); amqpMessageBody = AmqpMessageBody.fromData(messageData.getArray()); } else if (body instanceof AmqpValue) { amqpMessageBody = AmqpMessageBody.fromValue(((AmqpValue) body).getValue()); } else if (body instanceof AmqpSequence) { @SuppressWarnings("unchecked") List<Object> messageData = ((AmqpSequence) body).getValue(); amqpMessageBody = AmqpMessageBody.fromSequence(messageData); } else { logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, body.getType())); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } } else { logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, "null")); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } final ServiceBusReceivedMessage brokeredMessage = new ServiceBusReceivedMessage(amqpMessageBody); AmqpAnnotatedMessage brokeredAmqpAnnotatedMessage = brokeredMessage.getRawAmqpMessage(); ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); if (applicationProperties != null) { final Map<String, Object> propertiesValue = convertToOriginType(applicationProperties.getValue()); brokeredAmqpAnnotatedMessage.getApplicationProperties().putAll(propertiesValue); } final AmqpMessageHeader brokeredHeader = brokeredAmqpAnnotatedMessage.getHeader(); brokeredHeader.setTimeToLive(Duration.ofMillis(amqpMessage.getTtl())); brokeredHeader.setDeliveryCount(amqpMessage.getDeliveryCount()); brokeredHeader.setDurable(amqpMessage.getHeader().getDurable()); brokeredHeader.setFirstAcquirer(amqpMessage.getHeader().getFirstAcquirer()); brokeredHeader.setPriority(amqpMessage.getPriority()); final Footer footer = amqpMessage.getFooter(); if (footer != null && footer.getValue() != null) { @SuppressWarnings("unchecked") final Map<Symbol, Object> footerValue = footer.getValue(); setValues(footerValue, brokeredAmqpAnnotatedMessage.getFooter()); } final AmqpMessageProperties brokeredProperties = brokeredAmqpAnnotatedMessage.getProperties(); brokeredProperties.setReplyToGroupId(amqpMessage.getReplyToGroupId()); final String replyTo = amqpMessage.getReplyTo(); if (replyTo != null) { brokeredProperties.setReplyTo(new AmqpAddress(amqpMessage.getReplyTo())); } final Object messageId = amqpMessage.getMessageId(); if (messageId != null) { brokeredProperties.setMessageId(new AmqpMessageId(messageId.toString())); } brokeredProperties.setContentType(amqpMessage.getContentType()); final Object correlationId = amqpMessage.getCorrelationId(); if (correlationId != null) { brokeredProperties.setCorrelationId(new AmqpMessageId(correlationId.toString())); } final Properties amqpProperties = amqpMessage.getProperties(); if (amqpProperties != null) { final String to = amqpProperties.getTo(); if (to != null) { brokeredProperties.setTo(new AmqpAddress(amqpProperties.getTo())); } if (amqpProperties.getAbsoluteExpiryTime() != null) { brokeredProperties.setAbsoluteExpiryTime(amqpProperties.getAbsoluteExpiryTime().toInstant() .atOffset(ZoneOffset.UTC)); } if (amqpProperties.getCreationTime() != null) { brokeredProperties.setCreationTime(amqpProperties.getCreationTime().toInstant() .atOffset(ZoneOffset.UTC)); } } brokeredProperties.setSubject(amqpMessage.getSubject()); brokeredProperties.setGroupId(amqpMessage.getGroupId()); brokeredProperties.setContentEncoding(amqpMessage.getContentEncoding()); brokeredProperties.setGroupSequence(amqpMessage.getGroupSequence()); brokeredProperties.setUserId(amqpMessage.getUserId()); final DeliveryAnnotations deliveryAnnotations = amqpMessage.getDeliveryAnnotations(); if (deliveryAnnotations != null) { setValues(deliveryAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getDeliveryAnnotations()); } final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); if (messageAnnotations != null) { setValues(messageAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getMessageAnnotations()); } if (amqpMessage instanceof MessageWithLockToken) { brokeredMessage.setLockToken(((MessageWithLockToken) amqpMessage).getLockToken()); } return brokeredMessage; } private static int getPayloadSize(Message msg) { if (msg == null || msg.getBody() == null) { return 0; } final Section bodySection = msg.getBody(); if (bodySection instanceof AmqpValue) { return sizeof(((AmqpValue) bodySection).getValue()); } else if (bodySection instanceof AmqpSequence) { return sizeof(((AmqpSequence) bodySection).getValue()); } else if (bodySection instanceof Data) { final Data payloadSection = (Data) bodySection; final Binary payloadBytes = payloadSection.getValue(); return sizeof(payloadBytes); } else { return 0; } } private void setValues(Map<Symbol, Object> sourceMap, Map<String, Object> targetMap) { if (sourceMap != null) { for (Map.Entry<Symbol, Object> entry : sourceMap.entrySet()) { targetMap.put(entry.getKey().toString(), entry.getValue()); } } } @SuppressWarnings("rawtypes") }
`<< 1` or `* 2`? Any meaning of the `<<`?
private static int sizeof(Object obj) { if (obj == null) { return 0; } if (obj instanceof String) { return obj.toString().length() << 1; } if (obj instanceof Symbol) { return ((Symbol) obj).length() << 1; } if (obj instanceof Byte || obj instanceof UnsignedByte) { return Byte.BYTES; } if (obj instanceof Integer || obj instanceof UnsignedInteger) { return Integer.BYTES; } if (obj instanceof Long || obj instanceof UnsignedLong || obj instanceof Date) { return Long.BYTES; } if (obj instanceof Short || obj instanceof UnsignedShort) { return Short.BYTES; } if (obj instanceof Boolean) { return 1; } if (obj instanceof Character) { return 4; } if (obj instanceof Float) { return Float.BYTES; } if (obj instanceof Double) { return Double.BYTES; } if (obj instanceof UUID) { return 16; } if (obj instanceof Decimal32) { return 4; } if (obj instanceof Decimal64) { return 8; } if (obj instanceof Decimal128) { return 16; } if (obj instanceof Binary) { return ((Binary) obj).getLength(); } if (obj instanceof Declare) { return 7; } if (obj instanceof Discharge) { Discharge discharge = (Discharge) obj; return 12 + discharge.getTxnId().getLength(); } if (obj instanceof ServiceBusDescribedType) { ServiceBusDescribedType describedType = (ServiceBusDescribedType) obj; int size = ((Symbol) describedType.getDescriptor()).length() << 1; if (describedType.getDescribed() instanceof String) { size += ((String) describedType.getDescribed()).length(); } else if (describedType.getDescribed() instanceof Long) { size += Long.BYTES; } return size; } if (obj instanceof Map) { int size = 8; Map map = (Map) obj; for (Object value : map.keySet()) { size += sizeof(value); } for (Object value : map.values()) { size += sizeof(value); } return size; } if (obj instanceof Iterable) { int size = 8; for (Object innerObject : (Iterable) obj) { size += sizeof(innerObject); } return size; } if (obj.getClass().isArray()) { int size = 8; int length = Array.getLength(obj); for (int i = 0; i < length; i++) { size += sizeof(Array.get(obj, i)); } return size; } throw new IllegalArgumentException(String.format(Locale.US, "Encoding Type: %s is not supported", obj.getClass())); }
for (Object value : map.values()) {
private static int sizeof(Object obj) { if (obj == null) { return 0; } if (obj instanceof String) { return obj.toString().length() << 1; } if (obj instanceof Symbol) { return ((Symbol) obj).length() << 1; } if (obj instanceof Byte || obj instanceof UnsignedByte) { return Byte.BYTES; } if (obj instanceof Integer || obj instanceof UnsignedInteger) { return Integer.BYTES; } if (obj instanceof Long || obj instanceof UnsignedLong || obj instanceof Date) { return Long.BYTES; } if (obj instanceof Short || obj instanceof UnsignedShort) { return Short.BYTES; } if (obj instanceof Boolean) { return 1; } if (obj instanceof Character) { return 4; } if (obj instanceof Float) { return Float.BYTES; } if (obj instanceof Double) { return Double.BYTES; } if (obj instanceof UUID) { return 16; } if (obj instanceof Decimal32) { return 4; } if (obj instanceof Decimal64) { return 8; } if (obj instanceof Decimal128) { return 16; } if (obj instanceof Binary) { return ((Binary) obj).getLength(); } if (obj instanceof Declare) { return 7; } if (obj instanceof Discharge) { Discharge discharge = (Discharge) obj; return 12 + discharge.getTxnId().getLength(); } if (obj instanceof ServiceBusDescribedType) { ServiceBusDescribedType describedType = (ServiceBusDescribedType) obj; return describedType.size(); } if (obj instanceof Map) { int size = 8; Map map = (Map) obj; for (Object value : map.keySet()) { size += sizeof(value); } for (Object value : map.values()) { size += sizeof(value); } return size; } if (obj instanceof Iterable) { int size = 8; for (Object innerObject : (Iterable) obj) { size += sizeof(innerObject); } return size; } if (obj.getClass().isArray()) { int size = 8; int length = Array.getLength(obj); for (int i = 0; i < length; i++) { size += sizeof(Array.get(obj, i)); } return size; } throw new IllegalArgumentException(String.format(Locale.US, "Encoding Type: %s is not supported", obj.getClass())); }
class ServiceBusMessageSerializer implements MessageSerializer { private static final byte[] EMPTY_BYTE_ARRAY = new byte[0]; private final ClientLogger logger = new ClientLogger(ServiceBusMessageSerializer.class); /** * Gets the serialized size of the AMQP message. */ @Override public int getSize(Message amqpMessage) { if (amqpMessage == null) { return 0; } int payloadSize = getPayloadSize(amqpMessage); final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); final ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); int annotationsSize = 0; int applicationPropertiesSize = 0; if (messageAnnotations != null) { final Map<Symbol, Object> map = messageAnnotations.getValue(); for (Map.Entry<Symbol, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); annotationsSize += size; } } if (applicationProperties != null) { final Map<String, Object> map = applicationProperties.getValue(); for (Map.Entry<String, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); applicationPropertiesSize += size; } } return annotationsSize + applicationPropertiesSize + payloadSize; } /** * Creates the AMQP message represented by this {@code object}. Currently, only supports serializing {@link * ServiceBusMessage}. * * @param object Concrete object to deserialize. * * @return A new AMQP message for this {@code object}. * * @throws IllegalArgumentException if {@code object} is not an instance of {@link ServiceBusMessage}. */ @Override public <T> Message serialize(T object) { Objects.requireNonNull(object, "'object' to serialize cannot be null."); if (!(object instanceof ServiceBusMessage)) { throw logger.logExceptionAsError(new IllegalArgumentException( "Cannot serialize object that is not ServiceBusMessage. Clazz: " + object.getClass())); } final ServiceBusMessage brokeredMessage = (ServiceBusMessage) object; AmqpMessageBodyType brokeredBodyType = brokeredMessage.getRawAmqpMessage().getBody().getBodyType(); final Message amqpMessage = Proton.message(); byte[] body; if (brokeredBodyType == AmqpMessageBodyType.DATA || brokeredBodyType == null) { body = brokeredMessage.getBody().toBytes(); amqpMessage.setBody(new Data(new Binary(body))); } else if (brokeredBodyType == AmqpMessageBodyType.SEQUENCE) { List<Object> sequenceList = brokeredMessage.getRawAmqpMessage().getBody().getSequence(); amqpMessage.setBody(new AmqpSequence(sequenceList)); } else if (brokeredBodyType == AmqpMessageBodyType.VALUE) { amqpMessage.setBody(new AmqpValue(brokeredMessage.getRawAmqpMessage().getBody().getValue())); } if (brokeredMessage.getApplicationProperties() != null) { amqpMessage.setApplicationProperties(new ApplicationProperties(convertToDescribedType(brokeredMessage.getApplicationProperties()))); } if (brokeredMessage.getTimeToLive() != null) { amqpMessage.setTtl(brokeredMessage.getTimeToLive().toMillis()); } if (amqpMessage.getProperties() == null) { amqpMessage.setProperties(new Properties()); } amqpMessage.setMessageId(brokeredMessage.getMessageId()); amqpMessage.setContentType(brokeredMessage.getContentType()); amqpMessage.setCorrelationId(brokeredMessage.getCorrelationId()); amqpMessage.setSubject(brokeredMessage.getSubject()); amqpMessage.setReplyTo(brokeredMessage.getReplyTo()); amqpMessage.setReplyToGroupId(brokeredMessage.getReplyToSessionId()); amqpMessage.setGroupId(brokeredMessage.getSessionId()); final AmqpMessageProperties brokeredProperties = brokeredMessage.getRawAmqpMessage().getProperties(); amqpMessage.setContentEncoding(brokeredProperties.getContentEncoding()); if (brokeredProperties.getGroupSequence() != null) { amqpMessage.setGroupSequence(brokeredProperties.getGroupSequence()); } amqpMessage.getProperties().setTo(brokeredMessage.getTo()); amqpMessage.getProperties().setUserId(new Binary(brokeredProperties.getUserId())); if (brokeredProperties.getAbsoluteExpiryTime() != null) { amqpMessage.getProperties().setAbsoluteExpiryTime(Date.from(brokeredProperties.getAbsoluteExpiryTime() .toInstant())); } if (brokeredProperties.getCreationTime() != null) { amqpMessage.getProperties().setCreationTime(Date.from(brokeredProperties.getCreationTime().toInstant())); } amqpMessage.setFooter(new Footer(brokeredMessage.getRawAmqpMessage().getFooter())); AmqpMessageHeader header = brokeredMessage.getRawAmqpMessage().getHeader(); if (header.getDeliveryCount() != null) { amqpMessage.setDeliveryCount(header.getDeliveryCount()); } if (header.getPriority() != null) { amqpMessage.setPriority(header.getPriority()); } if (header.isDurable() != null) { amqpMessage.setDurable(header.isDurable()); } if (header.isFirstAcquirer() != null) { amqpMessage.setFirstAcquirer(header.isFirstAcquirer()); } if (header.getTimeToLive() != null) { amqpMessage.setTtl(header.getTimeToLive().toMillis()); } final Map<Symbol, Object> messageAnnotationsMap = new HashMap<>(); if (brokeredMessage.getScheduledEnqueueTime() != null) { messageAnnotationsMap.put(Symbol.valueOf(SCHEDULED_ENQUEUE_UTC_TIME_NAME.getValue()), Date.from(brokeredMessage.getScheduledEnqueueTime().toInstant())); } final String partitionKey = brokeredMessage.getPartitionKey(); if (partitionKey != null && !partitionKey.isEmpty()) { messageAnnotationsMap.put(Symbol.valueOf(PARTITION_KEY_ANNOTATION_NAME.getValue()), brokeredMessage.getPartitionKey()); } amqpMessage.setMessageAnnotations(new MessageAnnotations(messageAnnotationsMap)); final Map<Symbol, Object> deliveryAnnotationsMap = new HashMap<>(); final Map<String, Object> deliveryAnnotations = brokeredMessage.getRawAmqpMessage() .getDeliveryAnnotations(); for (Map.Entry<String, Object> deliveryEntry : deliveryAnnotations.entrySet()) { deliveryAnnotationsMap.put(Symbol.valueOf(deliveryEntry.getKey()), deliveryEntry.getValue()); } amqpMessage.setDeliveryAnnotations(new DeliveryAnnotations(deliveryAnnotationsMap)); return amqpMessage; } private Map<String, Object> convertToDescribedType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof URI) { propertiesValue.put(entry.getKey(), new ServiceBusDescribedType(ServiceBusConstants.URI_SYMBOL, ((URI) value).toString())); } else if (value instanceof OffsetDateTime) { propertiesValue.put(entry.getKey(), new ServiceBusDescribedType(ServiceBusConstants.OFFSETDATETIME_SYMBOL, ((OffsetDateTime) value).atZoneSameInstant(ZoneOffset.UTC).toInstant().toEpochMilli())); } else if (value instanceof Duration) { propertiesValue.put(entry.getKey(), new ServiceBusDescribedType(ServiceBusConstants.DURATION_SYMBOL, ((Duration) value).toNanos())); } } return propertiesValue; } private Map<String, Object> convertToOriginType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof DescribedType) { DescribedType describedType = (DescribedType) value; if (ServiceBusConstants.URI_SYMBOL.equals(describedType.getDescriptor())) { propertiesValue.put(entry.getKey(), URI.create((String) describedType.getDescribed())); } else if (ServiceBusConstants.OFFSETDATETIME_SYMBOL.equals(describedType.getDescriptor())) { propertiesValue.put(entry.getKey(), OffsetDateTime.ofInstant(Instant.ofEpochMilli((long) describedType.getDescribed()), ZoneId.systemDefault())); } else if (ServiceBusConstants.DURATION_SYMBOL.equals(describedType.getDescriptor())) { propertiesValue.put(entry.getKey(), Duration.ofNanos((long) describedType.getDescribed())); } } } return propertiesValue; } @SuppressWarnings("unchecked") @Override public <T> T deserialize(Message message, Class<T> clazz) { Objects.requireNonNull(message, "'message' cannot be null."); Objects.requireNonNull(clazz, "'clazz' cannot be null."); if (clazz == ServiceBusReceivedMessage.class) { return (T) deserializeMessage(message); } else { throw logger.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } @SuppressWarnings("unchecked") @Override public <T> List<T> deserializeList(Message message, Class<T> clazz) { if (clazz == ServiceBusReceivedMessage.class) { return (List<T>) deserializeListOfMessages(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == Long.class) { return (List<T>) deserializeListOfLong(message); } else { throw logger.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } private List<Long> deserializeListOfLong(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.SEQUENCE_NUMBERS); if (expirationListObj instanceof long[]) { return Arrays.stream((long[]) expirationListObj) .boxed() .collect(Collectors.toList()); } } } return Collections.emptyList(); } private List<OffsetDateTime> deserializeListOfOffsetDateTime(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.EXPIRATIONS); if (expirationListObj instanceof Date[]) { return Arrays.stream((Date[]) expirationListObj) .map(date -> date.toInstant().atOffset(ZoneOffset.UTC)) .collect(Collectors.toList()); } } } return Collections.emptyList(); } @SuppressWarnings("rawtypes") private List<ServiceBusReceivedMessage> deserializeListOfMessages(Message amqpMessage) { final List<ServiceBusReceivedMessage> messageList = new ArrayList<>(); final AmqpResponseCode statusCode = RequestResponseUtils.getStatusCode(amqpMessage); if (statusCode != AmqpResponseCode.OK) { logger.warning("AMQP response did not contain OK status code. Actual: {}", statusCode); return Collections.emptyList(); } final Object responseBodyMap = ((AmqpValue) amqpMessage.getBody()).getValue(); if (responseBodyMap == null) { logger.warning("AMQP response did not contain a body."); return Collections.emptyList(); } else if (!(responseBodyMap instanceof Map)) { logger.warning("AMQP response body is not correct instance. Expected: {}. Actual: {}", Map.class, responseBodyMap.getClass()); return Collections.emptyList(); } final Object messages = ((Map) responseBodyMap).get(ManagementConstants.MESSAGES); if (messages == null) { logger.warning("Response body did not contain key: {}", ManagementConstants.MESSAGES); return Collections.emptyList(); } else if (!(messages instanceof Iterable)) { logger.warning("Response body contents is not the correct type. Expected: {}. Actual: {}", Iterable.class, messages.getClass()); return Collections.emptyList(); } for (Object message : (Iterable) messages) { if (!(message instanceof Map)) { logger.warning("Message inside iterable of message is not correct type. Expected: {}. Actual: {}", Map.class, message.getClass()); continue; } final Message responseMessage = Message.Factory.create(); final Binary messagePayLoad = (Binary) ((Map) message).get(ManagementConstants.MESSAGE); responseMessage.decode(messagePayLoad.getArray(), messagePayLoad.getArrayOffset(), messagePayLoad.getLength()); final ServiceBusReceivedMessage receivedMessage = deserializeMessage(responseMessage); if (((Map) message).containsKey(ManagementConstants.LOCK_TOKEN_KEY)) { receivedMessage.setLockToken((UUID) ((Map) message).get(ManagementConstants.LOCK_TOKEN_KEY)); } messageList.add(receivedMessage); } return messageList; } private ServiceBusReceivedMessage deserializeMessage(Message amqpMessage) { final Section body = amqpMessage.getBody(); AmqpMessageBody amqpMessageBody; if (body != null) { if (body instanceof Data) { final Binary messageData = ((Data) body).getValue(); amqpMessageBody = AmqpMessageBody.fromData(messageData.getArray()); } else if (body instanceof AmqpValue) { amqpMessageBody = AmqpMessageBody.fromValue(((AmqpValue) body).getValue()); } else if (body instanceof AmqpSequence) { @SuppressWarnings("unchecked") List<Object> messageData = ((AmqpSequence) body).getValue(); amqpMessageBody = AmqpMessageBody.fromSequence(messageData); } else { logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, body.getType())); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } } else { logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, "null")); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } final ServiceBusReceivedMessage brokeredMessage = new ServiceBusReceivedMessage(amqpMessageBody); AmqpAnnotatedMessage brokeredAmqpAnnotatedMessage = brokeredMessage.getRawAmqpMessage(); ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); if (applicationProperties != null) { final Map<String, Object> propertiesValue = applicationProperties.getValue(); brokeredAmqpAnnotatedMessage.getApplicationProperties().putAll(convertToOriginType(propertiesValue)); } final AmqpMessageHeader brokeredHeader = brokeredAmqpAnnotatedMessage.getHeader(); brokeredHeader.setTimeToLive(Duration.ofMillis(amqpMessage.getTtl())); brokeredHeader.setDeliveryCount(amqpMessage.getDeliveryCount()); brokeredHeader.setDurable(amqpMessage.getHeader().getDurable()); brokeredHeader.setFirstAcquirer(amqpMessage.getHeader().getFirstAcquirer()); brokeredHeader.setPriority(amqpMessage.getPriority()); final Footer footer = amqpMessage.getFooter(); if (footer != null && footer.getValue() != null) { @SuppressWarnings("unchecked") final Map<Symbol, Object> footerValue = footer.getValue(); setValues(footerValue, brokeredAmqpAnnotatedMessage.getFooter()); } final AmqpMessageProperties brokeredProperties = brokeredAmqpAnnotatedMessage.getProperties(); brokeredProperties.setReplyToGroupId(amqpMessage.getReplyToGroupId()); final String replyTo = amqpMessage.getReplyTo(); if (replyTo != null) { brokeredProperties.setReplyTo(new AmqpAddress(amqpMessage.getReplyTo())); } final Object messageId = amqpMessage.getMessageId(); if (messageId != null) { brokeredProperties.setMessageId(new AmqpMessageId(messageId.toString())); } brokeredProperties.setContentType(amqpMessage.getContentType()); final Object correlationId = amqpMessage.getCorrelationId(); if (correlationId != null) { brokeredProperties.setCorrelationId(new AmqpMessageId(correlationId.toString())); } final Properties amqpProperties = amqpMessage.getProperties(); if (amqpProperties != null) { final String to = amqpProperties.getTo(); if (to != null) { brokeredProperties.setTo(new AmqpAddress(amqpProperties.getTo())); } if (amqpProperties.getAbsoluteExpiryTime() != null) { brokeredProperties.setAbsoluteExpiryTime(amqpProperties.getAbsoluteExpiryTime().toInstant() .atOffset(ZoneOffset.UTC)); } if (amqpProperties.getCreationTime() != null) { brokeredProperties.setCreationTime(amqpProperties.getCreationTime().toInstant() .atOffset(ZoneOffset.UTC)); } } brokeredProperties.setSubject(amqpMessage.getSubject()); brokeredProperties.setGroupId(amqpMessage.getGroupId()); brokeredProperties.setContentEncoding(amqpMessage.getContentEncoding()); brokeredProperties.setGroupSequence(amqpMessage.getGroupSequence()); brokeredProperties.setUserId(amqpMessage.getUserId()); final DeliveryAnnotations deliveryAnnotations = amqpMessage.getDeliveryAnnotations(); if (deliveryAnnotations != null) { setValues(deliveryAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getDeliveryAnnotations()); } final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); if (messageAnnotations != null) { setValues(messageAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getMessageAnnotations()); } if (amqpMessage instanceof MessageWithLockToken) { brokeredMessage.setLockToken(((MessageWithLockToken) amqpMessage).getLockToken()); } return brokeredMessage; } private static int getPayloadSize(Message msg) { if (msg == null || msg.getBody() == null) { return 0; } final Section bodySection = msg.getBody(); if (bodySection instanceof AmqpValue) { return sizeof(((AmqpValue) bodySection).getValue()); } else if (bodySection instanceof AmqpSequence) { return sizeof(((AmqpSequence) bodySection).getValue()); } else if (bodySection instanceof Data) { final Data payloadSection = (Data) bodySection; final Binary payloadBytes = payloadSection.getValue(); return sizeof(payloadBytes); } else { return 0; } } private void setValues(Map<Symbol, Object> sourceMap, Map<String, Object> targetMap) { if (sourceMap != null) { for (Map.Entry<Symbol, Object> entry : sourceMap.entrySet()) { targetMap.put(entry.getKey().toString(), entry.getValue()); } } } @SuppressWarnings("rawtypes") }
class ServiceBusMessageSerializer implements MessageSerializer { private static final byte[] EMPTY_BYTE_ARRAY = new byte[0]; private final ClientLogger logger = new ClientLogger(ServiceBusMessageSerializer.class); /** * Gets the serialized size of the AMQP message. */ @Override public int getSize(Message amqpMessage) { if (amqpMessage == null) { return 0; } int payloadSize = getPayloadSize(amqpMessage); final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); final ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); int annotationsSize = 0; int applicationPropertiesSize = 0; if (messageAnnotations != null) { final Map<Symbol, Object> map = messageAnnotations.getValue(); for (Map.Entry<Symbol, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); annotationsSize += size; } } if (applicationProperties != null) { final Map<String, Object> map = applicationProperties.getValue(); for (Map.Entry<String, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); applicationPropertiesSize += size; } } return annotationsSize + applicationPropertiesSize + payloadSize; } /** * Creates the AMQP message represented by this {@code object}. Currently, only supports serializing {@link * ServiceBusMessage}. * * @param object Concrete object to deserialize. * * @return A new AMQP message for this {@code object}. * * @throws IllegalArgumentException if {@code object} is not an instance of {@link ServiceBusMessage}. */ @Override public <T> Message serialize(T object) { Objects.requireNonNull(object, "'object' to serialize cannot be null."); if (!(object instanceof ServiceBusMessage)) { throw logger.logExceptionAsError(new IllegalArgumentException( "Cannot serialize object that is not ServiceBusMessage. Clazz: " + object.getClass())); } final ServiceBusMessage brokeredMessage = (ServiceBusMessage) object; AmqpMessageBodyType brokeredBodyType = brokeredMessage.getRawAmqpMessage().getBody().getBodyType(); final Message amqpMessage = Proton.message(); byte[] body; if (brokeredBodyType == AmqpMessageBodyType.DATA || brokeredBodyType == null) { body = brokeredMessage.getBody().toBytes(); amqpMessage.setBody(new Data(new Binary(body))); } else if (brokeredBodyType == AmqpMessageBodyType.SEQUENCE) { List<Object> sequenceList = brokeredMessage.getRawAmqpMessage().getBody().getSequence(); amqpMessage.setBody(new AmqpSequence(sequenceList)); } else if (brokeredBodyType == AmqpMessageBodyType.VALUE) { amqpMessage.setBody(new AmqpValue(brokeredMessage.getRawAmqpMessage().getBody().getValue())); } if (brokeredMessage.getApplicationProperties() != null) { Map<String, Object> describedTypeMap = convertToDescribedType(brokeredMessage.getApplicationProperties()); amqpMessage.setApplicationProperties(new ApplicationProperties(describedTypeMap)); } if (brokeredMessage.getTimeToLive() != null) { amqpMessage.setTtl(brokeredMessage.getTimeToLive().toMillis()); } if (amqpMessage.getProperties() == null) { amqpMessage.setProperties(new Properties()); } amqpMessage.setMessageId(brokeredMessage.getMessageId()); amqpMessage.setContentType(brokeredMessage.getContentType()); amqpMessage.setCorrelationId(brokeredMessage.getCorrelationId()); amqpMessage.setSubject(brokeredMessage.getSubject()); amqpMessage.setReplyTo(brokeredMessage.getReplyTo()); amqpMessage.setReplyToGroupId(brokeredMessage.getReplyToSessionId()); amqpMessage.setGroupId(brokeredMessage.getSessionId()); final AmqpMessageProperties brokeredProperties = brokeredMessage.getRawAmqpMessage().getProperties(); amqpMessage.setContentEncoding(brokeredProperties.getContentEncoding()); if (brokeredProperties.getGroupSequence() != null) { amqpMessage.setGroupSequence(brokeredProperties.getGroupSequence()); } amqpMessage.getProperties().setTo(brokeredMessage.getTo()); amqpMessage.getProperties().setUserId(new Binary(brokeredProperties.getUserId())); if (brokeredProperties.getAbsoluteExpiryTime() != null) { amqpMessage.getProperties().setAbsoluteExpiryTime(Date.from(brokeredProperties.getAbsoluteExpiryTime() .toInstant())); } if (brokeredProperties.getCreationTime() != null) { amqpMessage.getProperties().setCreationTime(Date.from(brokeredProperties.getCreationTime().toInstant())); } amqpMessage.setFooter(new Footer(brokeredMessage.getRawAmqpMessage().getFooter())); AmqpMessageHeader header = brokeredMessage.getRawAmqpMessage().getHeader(); if (header.getDeliveryCount() != null) { amqpMessage.setDeliveryCount(header.getDeliveryCount()); } if (header.getPriority() != null) { amqpMessage.setPriority(header.getPriority()); } if (header.isDurable() != null) { amqpMessage.setDurable(header.isDurable()); } if (header.isFirstAcquirer() != null) { amqpMessage.setFirstAcquirer(header.isFirstAcquirer()); } if (header.getTimeToLive() != null) { amqpMessage.setTtl(header.getTimeToLive().toMillis()); } final Map<Symbol, Object> messageAnnotationsMap = new HashMap<>(); if (brokeredMessage.getScheduledEnqueueTime() != null) { messageAnnotationsMap.put(Symbol.valueOf(SCHEDULED_ENQUEUE_UTC_TIME_NAME.getValue()), Date.from(brokeredMessage.getScheduledEnqueueTime().toInstant())); } final String partitionKey = brokeredMessage.getPartitionKey(); if (partitionKey != null && !partitionKey.isEmpty()) { messageAnnotationsMap.put(Symbol.valueOf(PARTITION_KEY_ANNOTATION_NAME.getValue()), brokeredMessage.getPartitionKey()); } amqpMessage.setMessageAnnotations(new MessageAnnotations(messageAnnotationsMap)); final Map<Symbol, Object> deliveryAnnotationsMap = new HashMap<>(); final Map<String, Object> deliveryAnnotations = brokeredMessage.getRawAmqpMessage() .getDeliveryAnnotations(); for (Map.Entry<String, Object> deliveryEntry : deliveryAnnotations.entrySet()) { deliveryAnnotationsMap.put(Symbol.valueOf(deliveryEntry.getKey()), deliveryEntry.getValue()); } amqpMessage.setDeliveryAnnotations(new DeliveryAnnotations(deliveryAnnotationsMap)); return amqpMessage; } /** * Convert specific type to described type for sending on the wire. * @param propertiesValue application properties set by user which may contain specific type. * @return Map only contains primitive type and described type. */ private static Map<String, Object> convertToDescribedType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof URI) { entry.setValue(new UriDescribedType((URI) value)); } else if (value instanceof OffsetDateTime) { entry.setValue(new OffsetDateTimeDescribedType((OffsetDateTime) value)); } else if (value instanceof Duration) { entry.setValue(new DurationDescribedType((Duration) value)); } } return propertiesValue; } /** * Convert described type to origin type. * @param propertiesValue application properties from amqp message may contain described type. * @return Map without described type. */ private static Map<String, Object> convertToOriginType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof DescribedType) { entry.setValue(MessageUtils.describedToOrigin((DescribedType) value)); } } return propertiesValue; } @SuppressWarnings("unchecked") @Override public <T> T deserialize(Message message, Class<T> clazz) { Objects.requireNonNull(message, "'message' cannot be null."); Objects.requireNonNull(clazz, "'clazz' cannot be null."); if (clazz == ServiceBusReceivedMessage.class) { return (T) deserializeMessage(message); } else { throw logger.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } @SuppressWarnings("unchecked") @Override public <T> List<T> deserializeList(Message message, Class<T> clazz) { if (clazz == ServiceBusReceivedMessage.class) { return (List<T>) deserializeListOfMessages(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == Long.class) { return (List<T>) deserializeListOfLong(message); } else { throw logger.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } private List<Long> deserializeListOfLong(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.SEQUENCE_NUMBERS); if (expirationListObj instanceof long[]) { return Arrays.stream((long[]) expirationListObj) .boxed() .collect(Collectors.toList()); } } } return Collections.emptyList(); } private List<OffsetDateTime> deserializeListOfOffsetDateTime(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.EXPIRATIONS); if (expirationListObj instanceof Date[]) { return Arrays.stream((Date[]) expirationListObj) .map(date -> date.toInstant().atOffset(ZoneOffset.UTC)) .collect(Collectors.toList()); } } } return Collections.emptyList(); } @SuppressWarnings("rawtypes") private List<ServiceBusReceivedMessage> deserializeListOfMessages(Message amqpMessage) { final List<ServiceBusReceivedMessage> messageList = new ArrayList<>(); final AmqpResponseCode statusCode = RequestResponseUtils.getStatusCode(amqpMessage); if (statusCode != AmqpResponseCode.OK) { logger.warning("AMQP response did not contain OK status code. Actual: {}", statusCode); return Collections.emptyList(); } final Object responseBodyMap = ((AmqpValue) amqpMessage.getBody()).getValue(); if (responseBodyMap == null) { logger.warning("AMQP response did not contain a body."); return Collections.emptyList(); } else if (!(responseBodyMap instanceof Map)) { logger.warning("AMQP response body is not correct instance. Expected: {}. Actual: {}", Map.class, responseBodyMap.getClass()); return Collections.emptyList(); } final Object messages = ((Map) responseBodyMap).get(ManagementConstants.MESSAGES); if (messages == null) { logger.warning("Response body did not contain key: {}", ManagementConstants.MESSAGES); return Collections.emptyList(); } else if (!(messages instanceof Iterable)) { logger.warning("Response body contents is not the correct type. Expected: {}. Actual: {}", Iterable.class, messages.getClass()); return Collections.emptyList(); } for (Object message : (Iterable) messages) { if (!(message instanceof Map)) { logger.warning("Message inside iterable of message is not correct type. Expected: {}. Actual: {}", Map.class, message.getClass()); continue; } final Message responseMessage = Message.Factory.create(); final Binary messagePayLoad = (Binary) ((Map) message).get(ManagementConstants.MESSAGE); responseMessage.decode(messagePayLoad.getArray(), messagePayLoad.getArrayOffset(), messagePayLoad.getLength()); final ServiceBusReceivedMessage receivedMessage = deserializeMessage(responseMessage); if (((Map) message).containsKey(ManagementConstants.LOCK_TOKEN_KEY)) { receivedMessage.setLockToken((UUID) ((Map) message).get(ManagementConstants.LOCK_TOKEN_KEY)); } messageList.add(receivedMessage); } return messageList; } private ServiceBusReceivedMessage deserializeMessage(Message amqpMessage) { final Section body = amqpMessage.getBody(); AmqpMessageBody amqpMessageBody; if (body != null) { if (body instanceof Data) { final Binary messageData = ((Data) body).getValue(); amqpMessageBody = AmqpMessageBody.fromData(messageData.getArray()); } else if (body instanceof AmqpValue) { amqpMessageBody = AmqpMessageBody.fromValue(((AmqpValue) body).getValue()); } else if (body instanceof AmqpSequence) { @SuppressWarnings("unchecked") List<Object> messageData = ((AmqpSequence) body).getValue(); amqpMessageBody = AmqpMessageBody.fromSequence(messageData); } else { logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, body.getType())); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } } else { logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, "null")); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } final ServiceBusReceivedMessage brokeredMessage = new ServiceBusReceivedMessage(amqpMessageBody); AmqpAnnotatedMessage brokeredAmqpAnnotatedMessage = brokeredMessage.getRawAmqpMessage(); ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); if (applicationProperties != null) { final Map<String, Object> propertiesValue = convertToOriginType(applicationProperties.getValue()); brokeredAmqpAnnotatedMessage.getApplicationProperties().putAll(propertiesValue); } final AmqpMessageHeader brokeredHeader = brokeredAmqpAnnotatedMessage.getHeader(); brokeredHeader.setTimeToLive(Duration.ofMillis(amqpMessage.getTtl())); brokeredHeader.setDeliveryCount(amqpMessage.getDeliveryCount()); brokeredHeader.setDurable(amqpMessage.getHeader().getDurable()); brokeredHeader.setFirstAcquirer(amqpMessage.getHeader().getFirstAcquirer()); brokeredHeader.setPriority(amqpMessage.getPriority()); final Footer footer = amqpMessage.getFooter(); if (footer != null && footer.getValue() != null) { @SuppressWarnings("unchecked") final Map<Symbol, Object> footerValue = footer.getValue(); setValues(footerValue, brokeredAmqpAnnotatedMessage.getFooter()); } final AmqpMessageProperties brokeredProperties = brokeredAmqpAnnotatedMessage.getProperties(); brokeredProperties.setReplyToGroupId(amqpMessage.getReplyToGroupId()); final String replyTo = amqpMessage.getReplyTo(); if (replyTo != null) { brokeredProperties.setReplyTo(new AmqpAddress(amqpMessage.getReplyTo())); } final Object messageId = amqpMessage.getMessageId(); if (messageId != null) { brokeredProperties.setMessageId(new AmqpMessageId(messageId.toString())); } brokeredProperties.setContentType(amqpMessage.getContentType()); final Object correlationId = amqpMessage.getCorrelationId(); if (correlationId != null) { brokeredProperties.setCorrelationId(new AmqpMessageId(correlationId.toString())); } final Properties amqpProperties = amqpMessage.getProperties(); if (amqpProperties != null) { final String to = amqpProperties.getTo(); if (to != null) { brokeredProperties.setTo(new AmqpAddress(amqpProperties.getTo())); } if (amqpProperties.getAbsoluteExpiryTime() != null) { brokeredProperties.setAbsoluteExpiryTime(amqpProperties.getAbsoluteExpiryTime().toInstant() .atOffset(ZoneOffset.UTC)); } if (amqpProperties.getCreationTime() != null) { brokeredProperties.setCreationTime(amqpProperties.getCreationTime().toInstant() .atOffset(ZoneOffset.UTC)); } } brokeredProperties.setSubject(amqpMessage.getSubject()); brokeredProperties.setGroupId(amqpMessage.getGroupId()); brokeredProperties.setContentEncoding(amqpMessage.getContentEncoding()); brokeredProperties.setGroupSequence(amqpMessage.getGroupSequence()); brokeredProperties.setUserId(amqpMessage.getUserId()); final DeliveryAnnotations deliveryAnnotations = amqpMessage.getDeliveryAnnotations(); if (deliveryAnnotations != null) { setValues(deliveryAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getDeliveryAnnotations()); } final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); if (messageAnnotations != null) { setValues(messageAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getMessageAnnotations()); } if (amqpMessage instanceof MessageWithLockToken) { brokeredMessage.setLockToken(((MessageWithLockToken) amqpMessage).getLockToken()); } return brokeredMessage; } private static int getPayloadSize(Message msg) { if (msg == null || msg.getBody() == null) { return 0; } final Section bodySection = msg.getBody(); if (bodySection instanceof AmqpValue) { return sizeof(((AmqpValue) bodySection).getValue()); } else if (bodySection instanceof AmqpSequence) { return sizeof(((AmqpSequence) bodySection).getValue()); } else if (bodySection instanceof Data) { final Data payloadSection = (Data) bodySection; final Binary payloadBytes = payloadSection.getValue(); return sizeof(payloadBytes); } else { return 0; } } private void setValues(Map<Symbol, Object> sourceMap, Map<String, Object> targetMap) { if (sourceMap != null) { for (Map.Entry<Symbol, Object> entry : sourceMap.entrySet()) { targetMap.put(entry.getKey().toString(), entry.getValue()); } } } @SuppressWarnings("rawtypes") }
add a getSize() interface in ServiceBusDescribedType?
private static int sizeof(Object obj) { if (obj == null) { return 0; } if (obj instanceof String) { return obj.toString().length() << 1; } if (obj instanceof Symbol) { return ((Symbol) obj).length() << 1; } if (obj instanceof Byte || obj instanceof UnsignedByte) { return Byte.BYTES; } if (obj instanceof Integer || obj instanceof UnsignedInteger) { return Integer.BYTES; } if (obj instanceof Long || obj instanceof UnsignedLong || obj instanceof Date) { return Long.BYTES; } if (obj instanceof Short || obj instanceof UnsignedShort) { return Short.BYTES; } if (obj instanceof Boolean) { return 1; } if (obj instanceof Character) { return 4; } if (obj instanceof Float) { return Float.BYTES; } if (obj instanceof Double) { return Double.BYTES; } if (obj instanceof UUID) { return 16; } if (obj instanceof Decimal32) { return 4; } if (obj instanceof Decimal64) { return 8; } if (obj instanceof Decimal128) { return 16; } if (obj instanceof Binary) { return ((Binary) obj).getLength(); } if (obj instanceof Declare) { return 7; } if (obj instanceof Discharge) { Discharge discharge = (Discharge) obj; return 12 + discharge.getTxnId().getLength(); } if (obj instanceof ServiceBusDescribedType) { ServiceBusDescribedType describedType = (ServiceBusDescribedType) obj; int size = ((Symbol) describedType.getDescriptor()).length() << 1; if (describedType.getDescribed() instanceof String) { size += ((String) describedType.getDescribed()).length(); } else if (describedType.getDescribed() instanceof Long) { size += Long.BYTES; } return size; } if (obj instanceof Map) { int size = 8; Map map = (Map) obj; for (Object value : map.keySet()) { size += sizeof(value); } for (Object value : map.values()) { size += sizeof(value); } return size; } if (obj instanceof Iterable) { int size = 8; for (Object innerObject : (Iterable) obj) { size += sizeof(innerObject); } return size; } if (obj.getClass().isArray()) { int size = 8; int length = Array.getLength(obj); for (int i = 0; i < length; i++) { size += sizeof(Array.get(obj, i)); } return size; } throw new IllegalArgumentException(String.format(Locale.US, "Encoding Type: %s is not supported", obj.getClass())); }
if (obj instanceof Declare) {
private static int sizeof(Object obj) { if (obj == null) { return 0; } if (obj instanceof String) { return obj.toString().length() << 1; } if (obj instanceof Symbol) { return ((Symbol) obj).length() << 1; } if (obj instanceof Byte || obj instanceof UnsignedByte) { return Byte.BYTES; } if (obj instanceof Integer || obj instanceof UnsignedInteger) { return Integer.BYTES; } if (obj instanceof Long || obj instanceof UnsignedLong || obj instanceof Date) { return Long.BYTES; } if (obj instanceof Short || obj instanceof UnsignedShort) { return Short.BYTES; } if (obj instanceof Boolean) { return 1; } if (obj instanceof Character) { return 4; } if (obj instanceof Float) { return Float.BYTES; } if (obj instanceof Double) { return Double.BYTES; } if (obj instanceof UUID) { return 16; } if (obj instanceof Decimal32) { return 4; } if (obj instanceof Decimal64) { return 8; } if (obj instanceof Decimal128) { return 16; } if (obj instanceof Binary) { return ((Binary) obj).getLength(); } if (obj instanceof Declare) { return 7; } if (obj instanceof Discharge) { Discharge discharge = (Discharge) obj; return 12 + discharge.getTxnId().getLength(); } if (obj instanceof ServiceBusDescribedType) { ServiceBusDescribedType describedType = (ServiceBusDescribedType) obj; return describedType.size(); } if (obj instanceof Map) { int size = 8; Map map = (Map) obj; for (Object value : map.keySet()) { size += sizeof(value); } for (Object value : map.values()) { size += sizeof(value); } return size; } if (obj instanceof Iterable) { int size = 8; for (Object innerObject : (Iterable) obj) { size += sizeof(innerObject); } return size; } if (obj.getClass().isArray()) { int size = 8; int length = Array.getLength(obj); for (int i = 0; i < length; i++) { size += sizeof(Array.get(obj, i)); } return size; } throw new IllegalArgumentException(String.format(Locale.US, "Encoding Type: %s is not supported", obj.getClass())); }
class ServiceBusMessageSerializer implements MessageSerializer { private static final byte[] EMPTY_BYTE_ARRAY = new byte[0]; private final ClientLogger logger = new ClientLogger(ServiceBusMessageSerializer.class); /** * Gets the serialized size of the AMQP message. */ @Override public int getSize(Message amqpMessage) { if (amqpMessage == null) { return 0; } int payloadSize = getPayloadSize(amqpMessage); final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); final ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); int annotationsSize = 0; int applicationPropertiesSize = 0; if (messageAnnotations != null) { final Map<Symbol, Object> map = messageAnnotations.getValue(); for (Map.Entry<Symbol, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); annotationsSize += size; } } if (applicationProperties != null) { final Map<String, Object> map = applicationProperties.getValue(); for (Map.Entry<String, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); applicationPropertiesSize += size; } } return annotationsSize + applicationPropertiesSize + payloadSize; } /** * Creates the AMQP message represented by this {@code object}. Currently, only supports serializing {@link * ServiceBusMessage}. * * @param object Concrete object to deserialize. * * @return A new AMQP message for this {@code object}. * * @throws IllegalArgumentException if {@code object} is not an instance of {@link ServiceBusMessage}. */ @Override public <T> Message serialize(T object) { Objects.requireNonNull(object, "'object' to serialize cannot be null."); if (!(object instanceof ServiceBusMessage)) { throw logger.logExceptionAsError(new IllegalArgumentException( "Cannot serialize object that is not ServiceBusMessage. Clazz: " + object.getClass())); } final ServiceBusMessage brokeredMessage = (ServiceBusMessage) object; AmqpMessageBodyType brokeredBodyType = brokeredMessage.getRawAmqpMessage().getBody().getBodyType(); final Message amqpMessage = Proton.message(); byte[] body; if (brokeredBodyType == AmqpMessageBodyType.DATA || brokeredBodyType == null) { body = brokeredMessage.getBody().toBytes(); amqpMessage.setBody(new Data(new Binary(body))); } else if (brokeredBodyType == AmqpMessageBodyType.SEQUENCE) { List<Object> sequenceList = brokeredMessage.getRawAmqpMessage().getBody().getSequence(); amqpMessage.setBody(new AmqpSequence(sequenceList)); } else if (brokeredBodyType == AmqpMessageBodyType.VALUE) { amqpMessage.setBody(new AmqpValue(brokeredMessage.getRawAmqpMessage().getBody().getValue())); } if (brokeredMessage.getApplicationProperties() != null) { Map<String, Object> describedTypeMap = convertToDescribedType(brokeredMessage.getApplicationProperties()); amqpMessage.setApplicationProperties(new ApplicationProperties(describedTypeMap)); } if (brokeredMessage.getTimeToLive() != null) { amqpMessage.setTtl(brokeredMessage.getTimeToLive().toMillis()); } if (amqpMessage.getProperties() == null) { amqpMessage.setProperties(new Properties()); } amqpMessage.setMessageId(brokeredMessage.getMessageId()); amqpMessage.setContentType(brokeredMessage.getContentType()); amqpMessage.setCorrelationId(brokeredMessage.getCorrelationId()); amqpMessage.setSubject(brokeredMessage.getSubject()); amqpMessage.setReplyTo(brokeredMessage.getReplyTo()); amqpMessage.setReplyToGroupId(brokeredMessage.getReplyToSessionId()); amqpMessage.setGroupId(brokeredMessage.getSessionId()); final AmqpMessageProperties brokeredProperties = brokeredMessage.getRawAmqpMessage().getProperties(); amqpMessage.setContentEncoding(brokeredProperties.getContentEncoding()); if (brokeredProperties.getGroupSequence() != null) { amqpMessage.setGroupSequence(brokeredProperties.getGroupSequence()); } amqpMessage.getProperties().setTo(brokeredMessage.getTo()); amqpMessage.getProperties().setUserId(new Binary(brokeredProperties.getUserId())); if (brokeredProperties.getAbsoluteExpiryTime() != null) { amqpMessage.getProperties().setAbsoluteExpiryTime(Date.from(brokeredProperties.getAbsoluteExpiryTime() .toInstant())); } if (brokeredProperties.getCreationTime() != null) { amqpMessage.getProperties().setCreationTime(Date.from(brokeredProperties.getCreationTime().toInstant())); } amqpMessage.setFooter(new Footer(brokeredMessage.getRawAmqpMessage().getFooter())); AmqpMessageHeader header = brokeredMessage.getRawAmqpMessage().getHeader(); if (header.getDeliveryCount() != null) { amqpMessage.setDeliveryCount(header.getDeliveryCount()); } if (header.getPriority() != null) { amqpMessage.setPriority(header.getPriority()); } if (header.isDurable() != null) { amqpMessage.setDurable(header.isDurable()); } if (header.isFirstAcquirer() != null) { amqpMessage.setFirstAcquirer(header.isFirstAcquirer()); } if (header.getTimeToLive() != null) { amqpMessage.setTtl(header.getTimeToLive().toMillis()); } final Map<Symbol, Object> messageAnnotationsMap = new HashMap<>(); if (brokeredMessage.getScheduledEnqueueTime() != null) { messageAnnotationsMap.put(Symbol.valueOf(SCHEDULED_ENQUEUE_UTC_TIME_NAME.getValue()), Date.from(brokeredMessage.getScheduledEnqueueTime().toInstant())); } final String partitionKey = brokeredMessage.getPartitionKey(); if (partitionKey != null && !partitionKey.isEmpty()) { messageAnnotationsMap.put(Symbol.valueOf(PARTITION_KEY_ANNOTATION_NAME.getValue()), brokeredMessage.getPartitionKey()); } amqpMessage.setMessageAnnotations(new MessageAnnotations(messageAnnotationsMap)); final Map<Symbol, Object> deliveryAnnotationsMap = new HashMap<>(); final Map<String, Object> deliveryAnnotations = brokeredMessage.getRawAmqpMessage() .getDeliveryAnnotations(); for (Map.Entry<String, Object> deliveryEntry : deliveryAnnotations.entrySet()) { deliveryAnnotationsMap.put(Symbol.valueOf(deliveryEntry.getKey()), deliveryEntry.getValue()); } amqpMessage.setDeliveryAnnotations(new DeliveryAnnotations(deliveryAnnotationsMap)); return amqpMessage; } /** * For send application properties with OffsetDateTime, Duration and URI on the wire in amqp, * we need to convert these object to described type and then amqp can write these data into buffer. * @param propertiesValue application properties set by user which may contain specific type mentioned above. * @return Map only contains primitive type and described type. */ private static Map<String, Object> convertToDescribedType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof URI) { propertiesValue.put(entry.getKey(), new UriDescribedType(value)); } else if (value instanceof OffsetDateTime) { propertiesValue.put(entry.getKey(), new OffsetDateTimeDescribedType(value)); } else if (value instanceof Duration) { propertiesValue.put(entry.getKey(), new DurationDescribedType(value)); } } return propertiesValue; } /** * Reverse convert ServiceBusMessageSerializer * @param propertiesValue application properties from amqp message may contain described type. * @return Map without described type. */ private static Map<String, Object> convertToOriginType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof DescribedType) { DescribedType describedType = (DescribedType) value; if (ServiceBusConstants.URI_SYMBOL.equals(describedType.getDescriptor())) { propertiesValue.put(entry.getKey(), describedToURI(describedType.getDescribed())); } else if (ServiceBusConstants.OFFSETDATETIME_SYMBOL.equals(describedType.getDescriptor())) { propertiesValue.put(entry.getKey(), describedToOffsetDateTime(describedType.getDescribed())); } else if (ServiceBusConstants.DURATION_SYMBOL.equals(describedType.getDescriptor())) { propertiesValue.put(entry.getKey(), describedToDuration(describedType.getDescribed())); } } } return propertiesValue; } @SuppressWarnings("unchecked") @Override public <T> T deserialize(Message message, Class<T> clazz) { Objects.requireNonNull(message, "'message' cannot be null."); Objects.requireNonNull(clazz, "'clazz' cannot be null."); if (clazz == ServiceBusReceivedMessage.class) { return (T) deserializeMessage(message); } else { throw logger.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } @SuppressWarnings("unchecked") @Override public <T> List<T> deserializeList(Message message, Class<T> clazz) { if (clazz == ServiceBusReceivedMessage.class) { return (List<T>) deserializeListOfMessages(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == Long.class) { return (List<T>) deserializeListOfLong(message); } else { throw logger.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } private List<Long> deserializeListOfLong(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.SEQUENCE_NUMBERS); if (expirationListObj instanceof long[]) { return Arrays.stream((long[]) expirationListObj) .boxed() .collect(Collectors.toList()); } } } return Collections.emptyList(); } private List<OffsetDateTime> deserializeListOfOffsetDateTime(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.EXPIRATIONS); if (expirationListObj instanceof Date[]) { return Arrays.stream((Date[]) expirationListObj) .map(date -> date.toInstant().atOffset(ZoneOffset.UTC)) .collect(Collectors.toList()); } } } return Collections.emptyList(); } @SuppressWarnings("rawtypes") private List<ServiceBusReceivedMessage> deserializeListOfMessages(Message amqpMessage) { final List<ServiceBusReceivedMessage> messageList = new ArrayList<>(); final AmqpResponseCode statusCode = RequestResponseUtils.getStatusCode(amqpMessage); if (statusCode != AmqpResponseCode.OK) { logger.warning("AMQP response did not contain OK status code. Actual: {}", statusCode); return Collections.emptyList(); } final Object responseBodyMap = ((AmqpValue) amqpMessage.getBody()).getValue(); if (responseBodyMap == null) { logger.warning("AMQP response did not contain a body."); return Collections.emptyList(); } else if (!(responseBodyMap instanceof Map)) { logger.warning("AMQP response body is not correct instance. Expected: {}. Actual: {}", Map.class, responseBodyMap.getClass()); return Collections.emptyList(); } final Object messages = ((Map) responseBodyMap).get(ManagementConstants.MESSAGES); if (messages == null) { logger.warning("Response body did not contain key: {}", ManagementConstants.MESSAGES); return Collections.emptyList(); } else if (!(messages instanceof Iterable)) { logger.warning("Response body contents is not the correct type. Expected: {}. Actual: {}", Iterable.class, messages.getClass()); return Collections.emptyList(); } for (Object message : (Iterable) messages) { if (!(message instanceof Map)) { logger.warning("Message inside iterable of message is not correct type. Expected: {}. Actual: {}", Map.class, message.getClass()); continue; } final Message responseMessage = Message.Factory.create(); final Binary messagePayLoad = (Binary) ((Map) message).get(ManagementConstants.MESSAGE); responseMessage.decode(messagePayLoad.getArray(), messagePayLoad.getArrayOffset(), messagePayLoad.getLength()); final ServiceBusReceivedMessage receivedMessage = deserializeMessage(responseMessage); if (((Map) message).containsKey(ManagementConstants.LOCK_TOKEN_KEY)) { receivedMessage.setLockToken((UUID) ((Map) message).get(ManagementConstants.LOCK_TOKEN_KEY)); } messageList.add(receivedMessage); } return messageList; } private ServiceBusReceivedMessage deserializeMessage(Message amqpMessage) { final Section body = amqpMessage.getBody(); AmqpMessageBody amqpMessageBody; if (body != null) { if (body instanceof Data) { final Binary messageData = ((Data) body).getValue(); amqpMessageBody = AmqpMessageBody.fromData(messageData.getArray()); } else if (body instanceof AmqpValue) { amqpMessageBody = AmqpMessageBody.fromValue(((AmqpValue) body).getValue()); } else if (body instanceof AmqpSequence) { @SuppressWarnings("unchecked") List<Object> messageData = ((AmqpSequence) body).getValue(); amqpMessageBody = AmqpMessageBody.fromSequence(messageData); } else { logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, body.getType())); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } } else { logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, "null")); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } final ServiceBusReceivedMessage brokeredMessage = new ServiceBusReceivedMessage(amqpMessageBody); AmqpAnnotatedMessage brokeredAmqpAnnotatedMessage = brokeredMessage.getRawAmqpMessage(); ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); if (applicationProperties != null) { final Map<String, Object> propertiesValue = convertToOriginType(applicationProperties.getValue()); brokeredAmqpAnnotatedMessage.getApplicationProperties().putAll(propertiesValue); } final AmqpMessageHeader brokeredHeader = brokeredAmqpAnnotatedMessage.getHeader(); brokeredHeader.setTimeToLive(Duration.ofMillis(amqpMessage.getTtl())); brokeredHeader.setDeliveryCount(amqpMessage.getDeliveryCount()); brokeredHeader.setDurable(amqpMessage.getHeader().getDurable()); brokeredHeader.setFirstAcquirer(amqpMessage.getHeader().getFirstAcquirer()); brokeredHeader.setPriority(amqpMessage.getPriority()); final Footer footer = amqpMessage.getFooter(); if (footer != null && footer.getValue() != null) { @SuppressWarnings("unchecked") final Map<Symbol, Object> footerValue = footer.getValue(); setValues(footerValue, brokeredAmqpAnnotatedMessage.getFooter()); } final AmqpMessageProperties brokeredProperties = brokeredAmqpAnnotatedMessage.getProperties(); brokeredProperties.setReplyToGroupId(amqpMessage.getReplyToGroupId()); final String replyTo = amqpMessage.getReplyTo(); if (replyTo != null) { brokeredProperties.setReplyTo(new AmqpAddress(amqpMessage.getReplyTo())); } final Object messageId = amqpMessage.getMessageId(); if (messageId != null) { brokeredProperties.setMessageId(new AmqpMessageId(messageId.toString())); } brokeredProperties.setContentType(amqpMessage.getContentType()); final Object correlationId = amqpMessage.getCorrelationId(); if (correlationId != null) { brokeredProperties.setCorrelationId(new AmqpMessageId(correlationId.toString())); } final Properties amqpProperties = amqpMessage.getProperties(); if (amqpProperties != null) { final String to = amqpProperties.getTo(); if (to != null) { brokeredProperties.setTo(new AmqpAddress(amqpProperties.getTo())); } if (amqpProperties.getAbsoluteExpiryTime() != null) { brokeredProperties.setAbsoluteExpiryTime(amqpProperties.getAbsoluteExpiryTime().toInstant() .atOffset(ZoneOffset.UTC)); } if (amqpProperties.getCreationTime() != null) { brokeredProperties.setCreationTime(amqpProperties.getCreationTime().toInstant() .atOffset(ZoneOffset.UTC)); } } brokeredProperties.setSubject(amqpMessage.getSubject()); brokeredProperties.setGroupId(amqpMessage.getGroupId()); brokeredProperties.setContentEncoding(amqpMessage.getContentEncoding()); brokeredProperties.setGroupSequence(amqpMessage.getGroupSequence()); brokeredProperties.setUserId(amqpMessage.getUserId()); final DeliveryAnnotations deliveryAnnotations = amqpMessage.getDeliveryAnnotations(); if (deliveryAnnotations != null) { setValues(deliveryAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getDeliveryAnnotations()); } final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); if (messageAnnotations != null) { setValues(messageAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getMessageAnnotations()); } if (amqpMessage instanceof MessageWithLockToken) { brokeredMessage.setLockToken(((MessageWithLockToken) amqpMessage).getLockToken()); } return brokeredMessage; } private static int getPayloadSize(Message msg) { if (msg == null || msg.getBody() == null) { return 0; } final Section bodySection = msg.getBody(); if (bodySection instanceof AmqpValue) { return sizeof(((AmqpValue) bodySection).getValue()); } else if (bodySection instanceof AmqpSequence) { return sizeof(((AmqpSequence) bodySection).getValue()); } else if (bodySection instanceof Data) { final Data payloadSection = (Data) bodySection; final Binary payloadBytes = payloadSection.getValue(); return sizeof(payloadBytes); } else { return 0; } } private void setValues(Map<Symbol, Object> sourceMap, Map<String, Object> targetMap) { if (sourceMap != null) { for (Map.Entry<Symbol, Object> entry : sourceMap.entrySet()) { targetMap.put(entry.getKey().toString(), entry.getValue()); } } } @SuppressWarnings("rawtypes") }
class ServiceBusMessageSerializer implements MessageSerializer { private static final byte[] EMPTY_BYTE_ARRAY = new byte[0]; private final ClientLogger logger = new ClientLogger(ServiceBusMessageSerializer.class); /** * Gets the serialized size of the AMQP message. */ @Override public int getSize(Message amqpMessage) { if (amqpMessage == null) { return 0; } int payloadSize = getPayloadSize(amqpMessage); final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); final ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); int annotationsSize = 0; int applicationPropertiesSize = 0; if (messageAnnotations != null) { final Map<Symbol, Object> map = messageAnnotations.getValue(); for (Map.Entry<Symbol, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); annotationsSize += size; } } if (applicationProperties != null) { final Map<String, Object> map = applicationProperties.getValue(); for (Map.Entry<String, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); applicationPropertiesSize += size; } } return annotationsSize + applicationPropertiesSize + payloadSize; } /** * Creates the AMQP message represented by this {@code object}. Currently, only supports serializing {@link * ServiceBusMessage}. * * @param object Concrete object to deserialize. * * @return A new AMQP message for this {@code object}. * * @throws IllegalArgumentException if {@code object} is not an instance of {@link ServiceBusMessage}. */ @Override public <T> Message serialize(T object) { Objects.requireNonNull(object, "'object' to serialize cannot be null."); if (!(object instanceof ServiceBusMessage)) { throw logger.logExceptionAsError(new IllegalArgumentException( "Cannot serialize object that is not ServiceBusMessage. Clazz: " + object.getClass())); } final ServiceBusMessage brokeredMessage = (ServiceBusMessage) object; AmqpMessageBodyType brokeredBodyType = brokeredMessage.getRawAmqpMessage().getBody().getBodyType(); final Message amqpMessage = Proton.message(); byte[] body; if (brokeredBodyType == AmqpMessageBodyType.DATA || brokeredBodyType == null) { body = brokeredMessage.getBody().toBytes(); amqpMessage.setBody(new Data(new Binary(body))); } else if (brokeredBodyType == AmqpMessageBodyType.SEQUENCE) { List<Object> sequenceList = brokeredMessage.getRawAmqpMessage().getBody().getSequence(); amqpMessage.setBody(new AmqpSequence(sequenceList)); } else if (brokeredBodyType == AmqpMessageBodyType.VALUE) { amqpMessage.setBody(new AmqpValue(brokeredMessage.getRawAmqpMessage().getBody().getValue())); } if (brokeredMessage.getApplicationProperties() != null) { Map<String, Object> describedTypeMap = convertToDescribedType(brokeredMessage.getApplicationProperties()); amqpMessage.setApplicationProperties(new ApplicationProperties(describedTypeMap)); } if (brokeredMessage.getTimeToLive() != null) { amqpMessage.setTtl(brokeredMessage.getTimeToLive().toMillis()); } if (amqpMessage.getProperties() == null) { amqpMessage.setProperties(new Properties()); } amqpMessage.setMessageId(brokeredMessage.getMessageId()); amqpMessage.setContentType(brokeredMessage.getContentType()); amqpMessage.setCorrelationId(brokeredMessage.getCorrelationId()); amqpMessage.setSubject(brokeredMessage.getSubject()); amqpMessage.setReplyTo(brokeredMessage.getReplyTo()); amqpMessage.setReplyToGroupId(brokeredMessage.getReplyToSessionId()); amqpMessage.setGroupId(brokeredMessage.getSessionId()); final AmqpMessageProperties brokeredProperties = brokeredMessage.getRawAmqpMessage().getProperties(); amqpMessage.setContentEncoding(brokeredProperties.getContentEncoding()); if (brokeredProperties.getGroupSequence() != null) { amqpMessage.setGroupSequence(brokeredProperties.getGroupSequence()); } amqpMessage.getProperties().setTo(brokeredMessage.getTo()); amqpMessage.getProperties().setUserId(new Binary(brokeredProperties.getUserId())); if (brokeredProperties.getAbsoluteExpiryTime() != null) { amqpMessage.getProperties().setAbsoluteExpiryTime(Date.from(brokeredProperties.getAbsoluteExpiryTime() .toInstant())); } if (brokeredProperties.getCreationTime() != null) { amqpMessage.getProperties().setCreationTime(Date.from(brokeredProperties.getCreationTime().toInstant())); } amqpMessage.setFooter(new Footer(brokeredMessage.getRawAmqpMessage().getFooter())); AmqpMessageHeader header = brokeredMessage.getRawAmqpMessage().getHeader(); if (header.getDeliveryCount() != null) { amqpMessage.setDeliveryCount(header.getDeliveryCount()); } if (header.getPriority() != null) { amqpMessage.setPriority(header.getPriority()); } if (header.isDurable() != null) { amqpMessage.setDurable(header.isDurable()); } if (header.isFirstAcquirer() != null) { amqpMessage.setFirstAcquirer(header.isFirstAcquirer()); } if (header.getTimeToLive() != null) { amqpMessage.setTtl(header.getTimeToLive().toMillis()); } final Map<Symbol, Object> messageAnnotationsMap = new HashMap<>(); if (brokeredMessage.getScheduledEnqueueTime() != null) { messageAnnotationsMap.put(Symbol.valueOf(SCHEDULED_ENQUEUE_UTC_TIME_NAME.getValue()), Date.from(brokeredMessage.getScheduledEnqueueTime().toInstant())); } final String partitionKey = brokeredMessage.getPartitionKey(); if (partitionKey != null && !partitionKey.isEmpty()) { messageAnnotationsMap.put(Symbol.valueOf(PARTITION_KEY_ANNOTATION_NAME.getValue()), brokeredMessage.getPartitionKey()); } amqpMessage.setMessageAnnotations(new MessageAnnotations(messageAnnotationsMap)); final Map<Symbol, Object> deliveryAnnotationsMap = new HashMap<>(); final Map<String, Object> deliveryAnnotations = brokeredMessage.getRawAmqpMessage() .getDeliveryAnnotations(); for (Map.Entry<String, Object> deliveryEntry : deliveryAnnotations.entrySet()) { deliveryAnnotationsMap.put(Symbol.valueOf(deliveryEntry.getKey()), deliveryEntry.getValue()); } amqpMessage.setDeliveryAnnotations(new DeliveryAnnotations(deliveryAnnotationsMap)); return amqpMessage; } /** * Convert specific type to described type for sending on the wire. * @param propertiesValue application properties set by user which may contain specific type. * @return Map only contains primitive type and described type. */ private static Map<String, Object> convertToDescribedType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof URI) { entry.setValue(new UriDescribedType((URI) value)); } else if (value instanceof OffsetDateTime) { entry.setValue(new OffsetDateTimeDescribedType((OffsetDateTime) value)); } else if (value instanceof Duration) { entry.setValue(new DurationDescribedType((Duration) value)); } } return propertiesValue; } /** * Convert described type to origin type. * @param propertiesValue application properties from amqp message may contain described type. * @return Map without described type. */ private static Map<String, Object> convertToOriginType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof DescribedType) { entry.setValue(MessageUtils.describedToOrigin((DescribedType) value)); } } return propertiesValue; } @SuppressWarnings("unchecked") @Override public <T> T deserialize(Message message, Class<T> clazz) { Objects.requireNonNull(message, "'message' cannot be null."); Objects.requireNonNull(clazz, "'clazz' cannot be null."); if (clazz == ServiceBusReceivedMessage.class) { return (T) deserializeMessage(message); } else { throw logger.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } @SuppressWarnings("unchecked") @Override public <T> List<T> deserializeList(Message message, Class<T> clazz) { if (clazz == ServiceBusReceivedMessage.class) { return (List<T>) deserializeListOfMessages(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == Long.class) { return (List<T>) deserializeListOfLong(message); } else { throw logger.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } private List<Long> deserializeListOfLong(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.SEQUENCE_NUMBERS); if (expirationListObj instanceof long[]) { return Arrays.stream((long[]) expirationListObj) .boxed() .collect(Collectors.toList()); } } } return Collections.emptyList(); } private List<OffsetDateTime> deserializeListOfOffsetDateTime(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.EXPIRATIONS); if (expirationListObj instanceof Date[]) { return Arrays.stream((Date[]) expirationListObj) .map(date -> date.toInstant().atOffset(ZoneOffset.UTC)) .collect(Collectors.toList()); } } } return Collections.emptyList(); } @SuppressWarnings("rawtypes") private List<ServiceBusReceivedMessage> deserializeListOfMessages(Message amqpMessage) { final List<ServiceBusReceivedMessage> messageList = new ArrayList<>(); final AmqpResponseCode statusCode = RequestResponseUtils.getStatusCode(amqpMessage); if (statusCode != AmqpResponseCode.OK) { logger.warning("AMQP response did not contain OK status code. Actual: {}", statusCode); return Collections.emptyList(); } final Object responseBodyMap = ((AmqpValue) amqpMessage.getBody()).getValue(); if (responseBodyMap == null) { logger.warning("AMQP response did not contain a body."); return Collections.emptyList(); } else if (!(responseBodyMap instanceof Map)) { logger.warning("AMQP response body is not correct instance. Expected: {}. Actual: {}", Map.class, responseBodyMap.getClass()); return Collections.emptyList(); } final Object messages = ((Map) responseBodyMap).get(ManagementConstants.MESSAGES); if (messages == null) { logger.warning("Response body did not contain key: {}", ManagementConstants.MESSAGES); return Collections.emptyList(); } else if (!(messages instanceof Iterable)) { logger.warning("Response body contents is not the correct type. Expected: {}. Actual: {}", Iterable.class, messages.getClass()); return Collections.emptyList(); } for (Object message : (Iterable) messages) { if (!(message instanceof Map)) { logger.warning("Message inside iterable of message is not correct type. Expected: {}. Actual: {}", Map.class, message.getClass()); continue; } final Message responseMessage = Message.Factory.create(); final Binary messagePayLoad = (Binary) ((Map) message).get(ManagementConstants.MESSAGE); responseMessage.decode(messagePayLoad.getArray(), messagePayLoad.getArrayOffset(), messagePayLoad.getLength()); final ServiceBusReceivedMessage receivedMessage = deserializeMessage(responseMessage); if (((Map) message).containsKey(ManagementConstants.LOCK_TOKEN_KEY)) { receivedMessage.setLockToken((UUID) ((Map) message).get(ManagementConstants.LOCK_TOKEN_KEY)); } messageList.add(receivedMessage); } return messageList; } private ServiceBusReceivedMessage deserializeMessage(Message amqpMessage) { final Section body = amqpMessage.getBody(); AmqpMessageBody amqpMessageBody; if (body != null) { if (body instanceof Data) { final Binary messageData = ((Data) body).getValue(); amqpMessageBody = AmqpMessageBody.fromData(messageData.getArray()); } else if (body instanceof AmqpValue) { amqpMessageBody = AmqpMessageBody.fromValue(((AmqpValue) body).getValue()); } else if (body instanceof AmqpSequence) { @SuppressWarnings("unchecked") List<Object> messageData = ((AmqpSequence) body).getValue(); amqpMessageBody = AmqpMessageBody.fromSequence(messageData); } else { logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, body.getType())); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } } else { logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, "null")); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } final ServiceBusReceivedMessage brokeredMessage = new ServiceBusReceivedMessage(amqpMessageBody); AmqpAnnotatedMessage brokeredAmqpAnnotatedMessage = brokeredMessage.getRawAmqpMessage(); ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); if (applicationProperties != null) { final Map<String, Object> propertiesValue = convertToOriginType(applicationProperties.getValue()); brokeredAmqpAnnotatedMessage.getApplicationProperties().putAll(propertiesValue); } final AmqpMessageHeader brokeredHeader = brokeredAmqpAnnotatedMessage.getHeader(); brokeredHeader.setTimeToLive(Duration.ofMillis(amqpMessage.getTtl())); brokeredHeader.setDeliveryCount(amqpMessage.getDeliveryCount()); brokeredHeader.setDurable(amqpMessage.getHeader().getDurable()); brokeredHeader.setFirstAcquirer(amqpMessage.getHeader().getFirstAcquirer()); brokeredHeader.setPriority(amqpMessage.getPriority()); final Footer footer = amqpMessage.getFooter(); if (footer != null && footer.getValue() != null) { @SuppressWarnings("unchecked") final Map<Symbol, Object> footerValue = footer.getValue(); setValues(footerValue, brokeredAmqpAnnotatedMessage.getFooter()); } final AmqpMessageProperties brokeredProperties = brokeredAmqpAnnotatedMessage.getProperties(); brokeredProperties.setReplyToGroupId(amqpMessage.getReplyToGroupId()); final String replyTo = amqpMessage.getReplyTo(); if (replyTo != null) { brokeredProperties.setReplyTo(new AmqpAddress(amqpMessage.getReplyTo())); } final Object messageId = amqpMessage.getMessageId(); if (messageId != null) { brokeredProperties.setMessageId(new AmqpMessageId(messageId.toString())); } brokeredProperties.setContentType(amqpMessage.getContentType()); final Object correlationId = amqpMessage.getCorrelationId(); if (correlationId != null) { brokeredProperties.setCorrelationId(new AmqpMessageId(correlationId.toString())); } final Properties amqpProperties = amqpMessage.getProperties(); if (amqpProperties != null) { final String to = amqpProperties.getTo(); if (to != null) { brokeredProperties.setTo(new AmqpAddress(amqpProperties.getTo())); } if (amqpProperties.getAbsoluteExpiryTime() != null) { brokeredProperties.setAbsoluteExpiryTime(amqpProperties.getAbsoluteExpiryTime().toInstant() .atOffset(ZoneOffset.UTC)); } if (amqpProperties.getCreationTime() != null) { brokeredProperties.setCreationTime(amqpProperties.getCreationTime().toInstant() .atOffset(ZoneOffset.UTC)); } } brokeredProperties.setSubject(amqpMessage.getSubject()); brokeredProperties.setGroupId(amqpMessage.getGroupId()); brokeredProperties.setContentEncoding(amqpMessage.getContentEncoding()); brokeredProperties.setGroupSequence(amqpMessage.getGroupSequence()); brokeredProperties.setUserId(amqpMessage.getUserId()); final DeliveryAnnotations deliveryAnnotations = amqpMessage.getDeliveryAnnotations(); if (deliveryAnnotations != null) { setValues(deliveryAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getDeliveryAnnotations()); } final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); if (messageAnnotations != null) { setValues(messageAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getMessageAnnotations()); } if (amqpMessage instanceof MessageWithLockToken) { brokeredMessage.setLockToken(((MessageWithLockToken) amqpMessage).getLockToken()); } return brokeredMessage; } private static int getPayloadSize(Message msg) { if (msg == null || msg.getBody() == null) { return 0; } final Section bodySection = msg.getBody(); if (bodySection instanceof AmqpValue) { return sizeof(((AmqpValue) bodySection).getValue()); } else if (bodySection instanceof AmqpSequence) { return sizeof(((AmqpSequence) bodySection).getValue()); } else if (bodySection instanceof Data) { final Data payloadSection = (Data) bodySection; final Binary payloadBytes = payloadSection.getValue(); return sizeof(payloadBytes); } else { return 0; } } private void setValues(Map<Symbol, Object> sourceMap, Map<String, Object> targetMap) { if (sourceMap != null) { for (Map.Entry<Symbol, Object> entry : sourceMap.entrySet()) { targetMap.put(entry.getKey().toString(), entry.getValue()); } } } @SuppressWarnings("rawtypes") }
Emit shutdown signal after CBS closing function returns, instead of waiting CBS completely closed. Therefore, the ReactorReceiver(s) can be closed in parallel with CBS node.
Mono<Void> closeAsync(AmqpShutdownSignal shutdownSignal) { final Mono<Void> emitShutdownSignalOperation = Mono.fromRunnable(() -> { addShutdownSignal(logger.atInfo(), shutdownSignal).log("Disposing of ReactorConnection."); final Sinks.EmitResult result = shutdownSignalSink.tryEmitValue(shutdownSignal); if (result.isFailure()) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(EMIT_RESULT_KEY, result) .log("Unable to emit shutdown signal."); } }); final Mono<Void> cbsCloseOperation; if (cbsChannelProcessor != null) { cbsCloseOperation = cbsChannelProcessor.flatMap(channel -> channel.closeAsync()); } else { cbsCloseOperation = Mono.empty(); } final Mono<Void> managementNodeCloseOperations = Mono.when( Flux.fromStream(managementNodes.values().stream()).flatMap(node -> node.closeAsync())); final Mono<Void> closeReactor = Mono.fromRunnable(() -> { logger.verbose("Scheduling closeConnection work."); final ReactorDispatcher dispatcher = reactorProvider.getReactorDispatcher(); if (dispatcher != null) { try { dispatcher.invoke(() -> closeConnectionWork()); } catch (IOException e) { logger.warning("IOException while scheduling closeConnection work. Manually disposing.", e); closeConnectionWork(); } catch (RejectedExecutionException e) { logger.info("Could not schedule closeConnection work. Manually disposing."); closeConnectionWork(); } } else { closeConnectionWork(); } }); return Mono.whenDelayError( cbsCloseOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed CBS node.")), managementNodeCloseOperations.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed management nodes.")), emitShutdownSignalOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Emitted connection shutdown signal. "))) .then(closeReactor.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed reactor dispatcher."))) .then(isClosedMono.asMono()); }
emitShutdownSignalOperation.doFinally(signalType ->
Mono<Void> closeAsync(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal).log("Disposing of ReactorConnection."); final Sinks.EmitResult result = shutdownSignalSink.tryEmitValue(shutdownSignal); if (result.isFailure()) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(EMIT_RESULT_KEY, result) .log("Unable to emit shutdown signal."); } final Mono<Void> cbsCloseOperation; if (cbsChannelProcessor != null) { cbsCloseOperation = cbsChannelProcessor.flatMap(channel -> channel.closeAsync()); } else { cbsCloseOperation = Mono.empty(); } final Mono<Void> managementNodeCloseOperations = Mono.when( Flux.fromStream(managementNodes.values().stream()).flatMap(node -> node.closeAsync())); final Mono<Void> closeReactor = Mono.fromRunnable(() -> { logger.verbose("Scheduling closeConnection work."); final ReactorDispatcher dispatcher = reactorProvider.getReactorDispatcher(); if (dispatcher != null) { try { dispatcher.invoke(() -> closeConnectionWork()); } catch (IOException e) { logger.warning("IOException while scheduling closeConnection work. Manually disposing.", e); closeConnectionWork(); } catch (RejectedExecutionException e) { logger.info("Could not schedule closeConnection work. Manually disposing."); closeConnectionWork(); } } else { closeConnectionWork(); } }); return Mono.whenDelayError( cbsCloseOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed CBS node.")), managementNodeCloseOperations.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed management nodes."))) .then(closeReactor.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed reactor dispatcher."))) .then(isClosedMono.asMono()); }
class ReactorConnection implements AmqpConnection { private static final String CBS_SESSION_NAME = "cbs-session"; private static final String CBS_ADDRESS = "$cbs"; private static final String CBS_LINK_NAME = "cbs"; private static final String MANAGEMENT_SESSION_NAME = "mgmt-session"; private static final String MANAGEMENT_ADDRESS = "$management"; private static final String MANAGEMENT_LINK_NAME = "mgmt"; private final ClientLogger logger; private final ConcurrentMap<String, SessionSubscription> sessionMap = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, AmqpManagementNode> managementNodes = new ConcurrentHashMap<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.One<AmqpShutdownSignal> shutdownSignalSink = Sinks.one(); private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final String connectionId; private final Mono<Connection> connectionMono; private final ConnectionHandler handler; private final ReactorHandlerProvider handlerProvider; private final TokenManagerProvider tokenManagerProvider; private final MessageSerializer messageSerializer; private final ConnectionOptions connectionOptions; private final ReactorProvider reactorProvider; private final AmqpRetryPolicy retryPolicy; private final SenderSettleMode senderSettleMode; private final ReceiverSettleMode receiverSettleMode; private final Duration operationTimeout; private final Composite subscriptions; private ReactorExecutor executor; private volatile ClaimsBasedSecurityChannel cbsChannel; private volatile AmqpChannelProcessor<RequestResponseChannel> cbsChannelProcessor; private volatile Connection connection; /** * Creates a new AMQP connection that uses proton-j. * * @param connectionId Identifier for the connection. * @param connectionOptions A set of options used to create the AMQP connection. * @param reactorProvider Provides proton-j Reactor instances. * @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events. * @param tokenManagerProvider Provides the appropriate token manager to authorize with CBS node. * @param messageSerializer Serializer to translate objects to and from proton-j {@link Message messages}. * @param senderSettleMode to set as {@link SenderSettleMode} on sender. * @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver. */ public ReactorConnection(String connectionId, ConnectionOptions connectionOptions, ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider, TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode) { this.connectionOptions = connectionOptions; this.reactorProvider = reactorProvider; this.connectionId = connectionId; this.logger = new ClientLogger(ReactorConnection.class, createContextWithConnectionId(connectionId)); this.handlerProvider = handlerProvider; this.tokenManagerProvider = Objects.requireNonNull(tokenManagerProvider, "'tokenManagerProvider' cannot be null."); this.messageSerializer = messageSerializer; this.handler = handlerProvider.createConnectionHandler(connectionId, connectionOptions); this.retryPolicy = RetryUtil.getRetryPolicy(connectionOptions.getRetry()); this.operationTimeout = connectionOptions.getRetry().getTryTimeout(); this.senderSettleMode = senderSettleMode; this.receiverSettleMode = receiverSettleMode; this.connectionMono = Mono.fromCallable(this::getOrCreateConnection) .flatMap(reactorConnection -> { final Mono<AmqpEndpointState> activeEndpoint = getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(operationTimeout, Mono.error(() -> new AmqpException(true, String.format( "Connection '%s' not opened within AmqpRetryOptions.tryTimeout(): %s", connectionId, operationTimeout), handler.getErrorContext()))); return activeEndpoint.thenReturn(reactorConnection); }) .doOnError(error -> { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already disposed: Error occurred while connection was starting.", error); } else { closeAsync(new AmqpShutdownSignal(false, false, "Error occurred while connection was starting. Error: " + error)).subscribe(); } }); this.endpointStates = this.handler.getEndpointStates() .takeUntilOther(shutdownSignalSink.asMono()) .map(state -> { logger.verbose("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .onErrorResume(error -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to error."); return closeAsync(new AmqpShutdownSignal(false, false, error.getMessage())).then(Mono.error(error)); } else { return Mono.error(error); } }) .doOnComplete(() -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to connection close."); closeAsync(new AmqpShutdownSignal(false, false, "Connection handler closed.")).subscribe(); } }) .cache(1); this.subscriptions = Disposables.composite(this.endpointStates.subscribe()); } /** * {@inheritDoc} */ @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } /** * Gets the shutdown signal associated with this connection. When it emits, the underlying connection is closed. * * @return Shutdown signals associated with this connection. It emits a signal when the underlying connection is * closed. */ @Override public Flux<AmqpShutdownSignal> getShutdownSignals() { return shutdownSignalSink.asMono().cache().flux(); } @Override public Mono<AmqpManagementNode> getManagementNode(String entityPath) { return Mono.defer(() -> { if (isDisposed()) { return Mono.error(logger.atError() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(Exceptions.propagate(new IllegalStateException("Connection is disposed. Cannot get management instance.")))); } final AmqpManagementNode existing = managementNodes.get(entityPath); if (existing != null) { return Mono.just(existing); } final TokenManager tokenManager = new AzureTokenManagerProvider(connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()) .getTokenManager(getClaimsBasedSecurityNode(), entityPath); return tokenManager.authorize().thenReturn(managementNodes.compute(entityPath, (key, current) -> { if (current != null) { logger.info("A management node exists already, returning it."); tokenManager.close(); return current; } final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME; final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME; final String address = entityPath + "/" + MANAGEMENT_ADDRESS; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue("address", address) .log("Creating management node."); final AmqpChannelProcessor<RequestResponseChannel> requestResponseChannel = createRequestResponseChannel(sessionName, linkName, address); return new ManagementChannel(requestResponseChannel, getFullyQualifiedNamespace(), entityPath, tokenManager); })); }); } /** * {@inheritDoc} */ @Override public Mono<ClaimsBasedSecurityNode> getClaimsBasedSecurityNode() { return connectionMono.then(Mono.fromCallable(() -> getOrCreateCBSNode())); } @Override public String getId() { return connectionId; } /** * {@inheritDoc} */ @Override public String getFullyQualifiedNamespace() { return handler.getHostname(); } /** * {@inheritDoc} */ @Override public int getMaxFrameSize() { return handler.getMaxFrameSize(); } /** * {@inheritDoc} */ @Override public Map<String, Object> getConnectionProperties() { return handler.getConnectionProperties(); } /** * {@inheritDoc} */ @Override public Mono<AmqpSession> createSession(String sessionName) { return connectionMono.map(connection -> { final SessionSubscription sessionSubscription = sessionMap.computeIfAbsent(sessionName, key -> { final SessionHandler sessionHandler = handlerProvider.createSessionHandler(connectionId, getFullyQualifiedNamespace(), key, connectionOptions.getRetry().getTryTimeout()); final Session session = connection.session(); BaseHandler.setHandler(session, sessionHandler); final AmqpSession amqpSession = createSession(key, session, sessionHandler); final Disposable subscription = amqpSession.getEndpointStates() .subscribe(state -> { }, error -> { if (isDisposed.get()) { return; } logger.atInfo() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Error occurred. Removing and disposing session", error); removeSession(key); }, () -> { if (isDisposed.get()) { return; } logger.atVerbose() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Complete. Removing and disposing session."); removeSession(key); }); return new SessionSubscription(amqpSession, subscription); }); return sessionSubscription; }).flatMap(sessionSubscription -> { final Mono<AmqpEndpointState> activeSession = sessionSubscription.getSession().getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(retryPolicy.getRetryOptions().getTryTimeout(), Mono.error(() -> new AmqpException(true, String.format("connectionId[%s] sessionName[%s] Timeout waiting for session to be active.", connectionId, sessionName), handler.getErrorContext()))); return activeSession.thenReturn(sessionSubscription.getSession()); }); } /** * Creates a new AMQP session with the given parameters. * * @param sessionName Name of the AMQP session. * @param session The reactor session associated with this session. * @param handler Session handler for the reactor session. * * @return A new instance of AMQP session. */ protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) { return new ReactorSession(this, session, handler, sessionName, reactorProvider, handlerProvider, getClaimsBasedSecurityNode(), tokenManagerProvider, messageSerializer, connectionOptions.getRetry()); } /** * {@inheritDoc} */ @Override public boolean removeSession(String sessionName) { if (sessionName == null) { return false; } final SessionSubscription removed = sessionMap.remove(sessionName); if (removed != null) { removed.dispose(); } return removed != null; } @Override public boolean isDisposed() { return isDisposed.get(); } /** * {@inheritDoc} */ @Override public void dispose() { final Duration timeout = operationTimeout.plus(operationTimeout); closeAsync().block(timeout); } /** * Gets the active AMQP connection for this instance. * * @return The AMQP connection. * * @throws AmqpException if the {@link Connection} was not transitioned to an active state within the given * {@link AmqpRetryOptions */ protected Mono<Connection> getReactorConnection() { return connectionMono; } /** * Creates a bidirectional link between the message broker and the client. * * @param sessionName Name of the session. * @param linkName Name of the link. * @param entityPath Address to the message broker. * * @return A new {@link RequestResponseChannel} to communicate with the message broker. */ protected AmqpChannelProcessor<RequestResponseChannel> createRequestResponseChannel(String sessionName, String linkName, String entityPath) { Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); final Flux<RequestResponseChannel> createChannel = createSession(sessionName) .cast(ReactorSession.class) .map(reactorSession -> new RequestResponseChannel(this, getId(), getFullyQualifiedNamespace(), linkName, entityPath, reactorSession.session(), connectionOptions.getRetry(), handlerProvider, reactorProvider, messageSerializer, senderSettleMode, receiverSettleMode)) .doOnNext(e -> { logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .log("Emitting new response channel."); }) .repeat() .takeUntilOther(shutdownSignalSink.asMono()); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(ENTITY_PATH_KEY, entityPath); return createChannel .subscribeWith(new AmqpChannelProcessor<>(getFullyQualifiedNamespace(), channel -> channel.getEndpointStates(), retryPolicy, loggingContext)); } @Override public Mono<Void> closeAsync() { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already closed. Not disposing again."); return isClosedMono.asMono(); } return closeAsync(new AmqpShutdownSignal(false, true, "Disposed by client.")); } private synchronized void closeConnectionWork() { if (connection == null) { isClosedMono.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atInfo(), signalType, emitResult) .log("Unable to complete closeMono."); return false; }); return; } connection.close(); handler.close(); final ArrayList<Mono<Void>> closingSessions = new ArrayList<>(); sessionMap.values().forEach(link -> closingSessions.add(link.isClosed())); final Mono<Void> closedExecutor = executor != null ? Mono.defer(() -> { synchronized (this) { logger.info("Closing executor."); return executor.closeAsync(); } }) : Mono.empty(); final Mono<Void> closeSessionAndExecutorMono = Mono.when(closingSessions) .timeout(operationTimeout) .onErrorResume(error -> { logger.info("Timed out waiting for all sessions to close."); return Mono.empty(); }) .then(closedExecutor) .then(Mono.fromRunnable(() -> { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit connection closed signal."); return false; }); subscriptions.dispose(); })); subscriptions.add(closeSessionAndExecutorMono.subscribe()); } private synchronized ClaimsBasedSecurityNode getOrCreateCBSNode() { if (cbsChannel == null) { logger.info("Setting CBS channel."); cbsChannelProcessor = createRequestResponseChannel(CBS_SESSION_NAME, CBS_LINK_NAME, CBS_ADDRESS); cbsChannel = new ClaimsBasedSecurityChannel( cbsChannelProcessor, connectionOptions.getTokenCredential(), connectionOptions.getAuthorizationType(), connectionOptions.getRetry()); } return cbsChannel; } private synchronized Connection getOrCreateConnection() throws IOException { if (connection == null) { logger.atInfo() .addKeyValue(HOSTNAME_KEY, handler.getHostname()) .addKeyValue("port", handler.getProtocolPort()) .log("Creating and starting connection."); final Reactor reactor = reactorProvider.createReactor(connectionId, handler.getMaxFrameSize()); connection = reactor.connectionToHost(handler.getHostname(), handler.getProtocolPort(), handler); final ReactorExceptionHandler reactorExceptionHandler = new ReactorExceptionHandler(); final Duration timeoutDivided = connectionOptions.getRetry().getTryTimeout().dividedBy(2); final Duration pendingTasksDuration = ClientConstants.SERVER_BUSY_WAIT_TIME.compareTo(timeoutDivided) < 0 ? ClientConstants.SERVER_BUSY_WAIT_TIME : timeoutDivided; final Scheduler scheduler = Schedulers.newSingle("reactor-executor"); executor = new ReactorExecutor(reactor, scheduler, connectionId, reactorExceptionHandler, pendingTasksDuration, connectionOptions.getFullyQualifiedNamespace()); final Mono<Void> executorCloseMono = Mono.defer(() -> { synchronized (this) { return executor.closeAsync(); } }); reactorProvider.getReactorDispatcher().getShutdownSignal() .flatMap(signal -> { reactorExceptionHandler.onConnectionShutdown(signal); return executorCloseMono; }) .onErrorResume(error -> { reactorExceptionHandler.onConnectionError(error); return executorCloseMono; }) .subscribe(); executor.start(); } return connection; } private final class ReactorExceptionHandler extends AmqpExceptionHandler { private ReactorExceptionHandler() { super(); } @Override public void onConnectionError(Throwable exception) { logger.atInfo() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionError, Starting new reactor", exception); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onReactorError: Disposing."); closeAsync(new AmqpShutdownSignal(false, false, "onReactorError: " + exception.toString())) .subscribe(); } } @Override void onConnectionShutdown(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown. Shutting down."); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown: disposing."); closeAsync(shutdownSignal).subscribe(); } } } private static final class SessionSubscription { private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AmqpSession session; private final Disposable subscription; private SessionSubscription(AmqpSession session, Disposable subscription) { this.session = session; this.subscription = subscription; } private AmqpSession getSession() { return session; } private void dispose() { if (isDisposed.getAndSet(true)) { return; } if (session instanceof ReactorSession) { ((ReactorSession) session).closeAsync("Closing session.", null, true) .subscribe(); } else { session.dispose(); } subscription.dispose(); } private Mono<Void> isClosed() { if (session instanceof ReactorSession) { return ((ReactorSession) session).isClosed(); } else { return Mono.empty(); } } } }
class ReactorConnection implements AmqpConnection { private static final String CBS_SESSION_NAME = "cbs-session"; private static final String CBS_ADDRESS = "$cbs"; private static final String CBS_LINK_NAME = "cbs"; private static final String MANAGEMENT_SESSION_NAME = "mgmt-session"; private static final String MANAGEMENT_ADDRESS = "$management"; private static final String MANAGEMENT_LINK_NAME = "mgmt"; private final ClientLogger logger; private final ConcurrentMap<String, SessionSubscription> sessionMap = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, AmqpManagementNode> managementNodes = new ConcurrentHashMap<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.One<AmqpShutdownSignal> shutdownSignalSink = Sinks.one(); private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final String connectionId; private final Mono<Connection> connectionMono; private final ConnectionHandler handler; private final ReactorHandlerProvider handlerProvider; private final TokenManagerProvider tokenManagerProvider; private final MessageSerializer messageSerializer; private final ConnectionOptions connectionOptions; private final ReactorProvider reactorProvider; private final AmqpRetryPolicy retryPolicy; private final SenderSettleMode senderSettleMode; private final ReceiverSettleMode receiverSettleMode; private final Duration operationTimeout; private final Composite subscriptions; private ReactorExecutor executor; private volatile ClaimsBasedSecurityChannel cbsChannel; private volatile AmqpChannelProcessor<RequestResponseChannel> cbsChannelProcessor; private volatile Connection connection; /** * Creates a new AMQP connection that uses proton-j. * * @param connectionId Identifier for the connection. * @param connectionOptions A set of options used to create the AMQP connection. * @param reactorProvider Provides proton-j Reactor instances. * @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events. * @param tokenManagerProvider Provides the appropriate token manager to authorize with CBS node. * @param messageSerializer Serializer to translate objects to and from proton-j {@link Message messages}. * @param senderSettleMode to set as {@link SenderSettleMode} on sender. * @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver. */ public ReactorConnection(String connectionId, ConnectionOptions connectionOptions, ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider, TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode) { this.connectionOptions = connectionOptions; this.reactorProvider = reactorProvider; this.connectionId = connectionId; this.logger = new ClientLogger(ReactorConnection.class, createContextWithConnectionId(connectionId)); this.handlerProvider = handlerProvider; this.tokenManagerProvider = Objects.requireNonNull(tokenManagerProvider, "'tokenManagerProvider' cannot be null."); this.messageSerializer = messageSerializer; this.handler = handlerProvider.createConnectionHandler(connectionId, connectionOptions); this.retryPolicy = RetryUtil.getRetryPolicy(connectionOptions.getRetry()); this.operationTimeout = connectionOptions.getRetry().getTryTimeout(); this.senderSettleMode = senderSettleMode; this.receiverSettleMode = receiverSettleMode; this.connectionMono = Mono.fromCallable(this::getOrCreateConnection) .flatMap(reactorConnection -> { final Mono<AmqpEndpointState> activeEndpoint = getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(operationTimeout, Mono.error(() -> new AmqpException(true, String.format( "Connection '%s' not opened within AmqpRetryOptions.tryTimeout(): %s", connectionId, operationTimeout), handler.getErrorContext()))); return activeEndpoint.thenReturn(reactorConnection); }) .doOnError(error -> { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already disposed: Error occurred while connection was starting.", error); } else { closeAsync(new AmqpShutdownSignal(false, false, "Error occurred while connection was starting. Error: " + error)).subscribe(); } }); this.endpointStates = this.handler.getEndpointStates() .takeUntilOther(shutdownSignalSink.asMono()) .map(state -> { logger.verbose("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .onErrorResume(error -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to error."); return closeAsync(new AmqpShutdownSignal(false, false, error.getMessage())).then(Mono.error(error)); } else { return Mono.error(error); } }) .doOnComplete(() -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to connection close."); closeAsync(new AmqpShutdownSignal(false, false, "Connection handler closed.")).subscribe(); } }) .cache(1); this.subscriptions = Disposables.composite(this.endpointStates.subscribe()); } /** * {@inheritDoc} */ @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } /** * Gets the shutdown signal associated with this connection. When it emits, the underlying connection is closed. * * @return Shutdown signals associated with this connection. It emits a signal when the underlying connection is * closed. */ @Override public Flux<AmqpShutdownSignal> getShutdownSignals() { return shutdownSignalSink.asMono().cache().flux(); } @Override public Mono<AmqpManagementNode> getManagementNode(String entityPath) { return Mono.defer(() -> { if (isDisposed()) { return Mono.error(logger.atError() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(Exceptions.propagate(new IllegalStateException("Connection is disposed. Cannot get management instance.")))); } final AmqpManagementNode existing = managementNodes.get(entityPath); if (existing != null) { return Mono.just(existing); } final TokenManager tokenManager = new AzureTokenManagerProvider(connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()) .getTokenManager(getClaimsBasedSecurityNode(), entityPath); return tokenManager.authorize().thenReturn(managementNodes.compute(entityPath, (key, current) -> { if (current != null) { logger.info("A management node exists already, returning it."); tokenManager.close(); return current; } final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME; final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME; final String address = entityPath + "/" + MANAGEMENT_ADDRESS; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue("address", address) .log("Creating management node."); final AmqpChannelProcessor<RequestResponseChannel> requestResponseChannel = createRequestResponseChannel(sessionName, linkName, address); return new ManagementChannel(requestResponseChannel, getFullyQualifiedNamespace(), entityPath, tokenManager); })); }); } /** * {@inheritDoc} */ @Override public Mono<ClaimsBasedSecurityNode> getClaimsBasedSecurityNode() { return connectionMono.then(Mono.fromCallable(() -> getOrCreateCBSNode())); } @Override public String getId() { return connectionId; } /** * {@inheritDoc} */ @Override public String getFullyQualifiedNamespace() { return handler.getHostname(); } /** * {@inheritDoc} */ @Override public int getMaxFrameSize() { return handler.getMaxFrameSize(); } /** * {@inheritDoc} */ @Override public Map<String, Object> getConnectionProperties() { return handler.getConnectionProperties(); } /** * {@inheritDoc} */ @Override public Mono<AmqpSession> createSession(String sessionName) { return connectionMono.map(connection -> { final SessionSubscription sessionSubscription = sessionMap.computeIfAbsent(sessionName, key -> { final SessionHandler sessionHandler = handlerProvider.createSessionHandler(connectionId, getFullyQualifiedNamespace(), key, connectionOptions.getRetry().getTryTimeout()); final Session session = connection.session(); BaseHandler.setHandler(session, sessionHandler); final AmqpSession amqpSession = createSession(key, session, sessionHandler); final Disposable subscription = amqpSession.getEndpointStates() .subscribe(state -> { }, error -> { if (isDisposed.get()) { return; } logger.atInfo() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Error occurred. Removing and disposing session", error); removeSession(key); }, () -> { if (isDisposed.get()) { return; } logger.atVerbose() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Complete. Removing and disposing session."); removeSession(key); }); return new SessionSubscription(amqpSession, subscription); }); return sessionSubscription; }).flatMap(sessionSubscription -> { final Mono<AmqpEndpointState> activeSession = sessionSubscription.getSession().getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(retryPolicy.getRetryOptions().getTryTimeout(), Mono.error(() -> new AmqpException(true, String.format("connectionId[%s] sessionName[%s] Timeout waiting for session to be active.", connectionId, sessionName), handler.getErrorContext()))); return activeSession.thenReturn(sessionSubscription.getSession()); }); } /** * Creates a new AMQP session with the given parameters. * * @param sessionName Name of the AMQP session. * @param session The reactor session associated with this session. * @param handler Session handler for the reactor session. * * @return A new instance of AMQP session. */ protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) { return new ReactorSession(this, session, handler, sessionName, reactorProvider, handlerProvider, getClaimsBasedSecurityNode(), tokenManagerProvider, messageSerializer, connectionOptions.getRetry()); } /** * {@inheritDoc} */ @Override public boolean removeSession(String sessionName) { if (sessionName == null) { return false; } final SessionSubscription removed = sessionMap.remove(sessionName); if (removed != null) { removed.dispose(); } return removed != null; } @Override public boolean isDisposed() { return isDisposed.get(); } /** * {@inheritDoc} */ @Override public void dispose() { final Duration timeout = operationTimeout.plus(operationTimeout); closeAsync().block(timeout); } /** * Gets the active AMQP connection for this instance. * * @return The AMQP connection. * * @throws AmqpException if the {@link Connection} was not transitioned to an active state within the given * {@link AmqpRetryOptions */ protected Mono<Connection> getReactorConnection() { return connectionMono; } /** * Creates a bidirectional link between the message broker and the client. * * @param sessionName Name of the session. * @param linkName Name of the link. * @param entityPath Address to the message broker. * * @return A new {@link RequestResponseChannel} to communicate with the message broker. */ protected AmqpChannelProcessor<RequestResponseChannel> createRequestResponseChannel(String sessionName, String linkName, String entityPath) { Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); final Flux<RequestResponseChannel> createChannel = createSession(sessionName) .cast(ReactorSession.class) .map(reactorSession -> new RequestResponseChannel(this, getId(), getFullyQualifiedNamespace(), linkName, entityPath, reactorSession.session(), connectionOptions.getRetry(), handlerProvider, reactorProvider, messageSerializer, senderSettleMode, receiverSettleMode)) .doOnNext(e -> { logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .log("Emitting new response channel."); }) .repeat(() -> !this.isDisposed()); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(ENTITY_PATH_KEY, entityPath); return createChannel .subscribeWith(new AmqpChannelProcessor<>(getFullyQualifiedNamespace(), channel -> channel.getEndpointStates(), retryPolicy, loggingContext)); } @Override public Mono<Void> closeAsync() { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already closed. Not disposing again."); return isClosedMono.asMono(); } return closeAsync(new AmqpShutdownSignal(false, true, "Disposed by client.")); } private synchronized void closeConnectionWork() { if (connection == null) { isClosedMono.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atInfo(), signalType, emitResult) .log("Unable to complete closeMono."); return false; }); return; } connection.close(); handler.close(); final ArrayList<Mono<Void>> closingSessions = new ArrayList<>(); sessionMap.values().forEach(link -> closingSessions.add(link.isClosed())); final Mono<Void> closedExecutor = executor != null ? Mono.defer(() -> { synchronized (this) { logger.info("Closing executor."); return executor.closeAsync(); } }) : Mono.empty(); final Mono<Void> closeSessionAndExecutorMono = Mono.when(closingSessions) .timeout(operationTimeout) .onErrorResume(error -> { logger.info("Timed out waiting for all sessions to close."); return Mono.empty(); }) .then(closedExecutor) .then(Mono.fromRunnable(() -> { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit connection closed signal."); return false; }); subscriptions.dispose(); })); subscriptions.add(closeSessionAndExecutorMono.subscribe()); } private synchronized ClaimsBasedSecurityNode getOrCreateCBSNode() { if (cbsChannel == null) { logger.info("Setting CBS channel."); cbsChannelProcessor = createRequestResponseChannel(CBS_SESSION_NAME, CBS_LINK_NAME, CBS_ADDRESS); cbsChannel = new ClaimsBasedSecurityChannel( cbsChannelProcessor, connectionOptions.getTokenCredential(), connectionOptions.getAuthorizationType(), connectionOptions.getRetry()); } return cbsChannel; } private synchronized Connection getOrCreateConnection() throws IOException { if (connection == null) { logger.atInfo() .addKeyValue(HOSTNAME_KEY, handler.getHostname()) .addKeyValue("port", handler.getProtocolPort()) .log("Creating and starting connection."); final Reactor reactor = reactorProvider.createReactor(connectionId, handler.getMaxFrameSize()); connection = reactor.connectionToHost(handler.getHostname(), handler.getProtocolPort(), handler); final ReactorExceptionHandler reactorExceptionHandler = new ReactorExceptionHandler(); final Duration timeoutDivided = connectionOptions.getRetry().getTryTimeout().dividedBy(2); final Duration pendingTasksDuration = ClientConstants.SERVER_BUSY_WAIT_TIME.compareTo(timeoutDivided) < 0 ? ClientConstants.SERVER_BUSY_WAIT_TIME : timeoutDivided; final Scheduler scheduler = Schedulers.newSingle("reactor-executor"); executor = new ReactorExecutor(reactor, scheduler, connectionId, reactorExceptionHandler, pendingTasksDuration, connectionOptions.getFullyQualifiedNamespace()); final Mono<Void> executorCloseMono = Mono.defer(() -> { synchronized (this) { return executor.closeAsync(); } }); reactorProvider.getReactorDispatcher().getShutdownSignal() .flatMap(signal -> { reactorExceptionHandler.onConnectionShutdown(signal); return executorCloseMono; }) .onErrorResume(error -> { reactorExceptionHandler.onConnectionError(error); return executorCloseMono; }) .subscribe(); executor.start(); } return connection; } private final class ReactorExceptionHandler extends AmqpExceptionHandler { private ReactorExceptionHandler() { super(); } @Override public void onConnectionError(Throwable exception) { logger.atInfo() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionError, Starting new reactor", exception); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onReactorError: Disposing."); closeAsync(new AmqpShutdownSignal(false, false, "onReactorError: " + exception.toString())) .subscribe(); } } @Override void onConnectionShutdown(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown. Shutting down."); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown: disposing."); closeAsync(shutdownSignal).subscribe(); } } } private static final class SessionSubscription { private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AmqpSession session; private final Disposable subscription; private SessionSubscription(AmqpSession session, Disposable subscription) { this.session = session; this.subscription = subscription; } private AmqpSession getSession() { return session; } private void dispose() { if (isDisposed.getAndSet(true)) { return; } if (session instanceof ReactorSession) { ((ReactorSession) session).closeAsync("Closing session.", null, true) .subscribe(); } else { session.dispose(); } subscription.dispose(); } private Mono<Void> isClosed() { if (session instanceof ReactorSession) { return ((ReactorSession) session).isClosed(); } else { return Mono.empty(); } } } }
also test with some char with 2 or more bytes in utf8
void serializeMessageWithSpecificApplicationProperties() { String contents = "some contents"; String messageId = "messageId"; final ServiceBusMessage message = getServiceBusMessage(contents, messageId); HashMap<String, Object> specificMap = new HashMap<>(); specificMap.put("uri", URI.create("https: specificMap.put("duration", Duration.ZERO); specificMap.put("offsetDateTime", OffsetDateTime.now()); message.getApplicationProperties().putAll(specificMap); Message amqpMessage = serializer.serialize(message); assertEquals(specificMap.size(), amqpMessage.getApplicationProperties().getValue().size()); AtomicInteger convertCount = new AtomicInteger(); specificMap.forEach((key, value) -> { Assertions.assertTrue(amqpMessage.getApplicationProperties().getValue().containsKey(key)); if (value instanceof URI) { assertEquals(((URI) value).toString(), ((ServiceBusDescribedType) amqpMessage.getApplicationProperties().getValue().get(key)).getDescribed()); convertCount.getAndIncrement(); } else if (value instanceof Duration) { convertCount.getAndIncrement(); } else if (value instanceof OffsetDateTime) { convertCount.getAndIncrement(); } }); assertEquals(specificMap.size(), convertCount.get()); }
String contents = "some contents";
void serializeMessageWithSpecificApplicationProperties() { String contents = "some contents"; String messageId = "messageId"; final ServiceBusMessage message = getServiceBusMessage(contents, messageId); HashMap<String, Object> specificMap = new HashMap<>(); specificMap.put("uri", URI.create("https: specificMap.put("duration", Duration.ZERO); specificMap.put("offsetDateTime", OffsetDateTime.now()); message.getApplicationProperties().putAll(specificMap); Message amqpMessage = serializer.serialize(message); assertEquals(specificMap.size(), amqpMessage.getApplicationProperties().getValue().size()); AtomicInteger convertCount = new AtomicInteger(); specificMap.forEach((key, value) -> { Assertions.assertTrue(amqpMessage.getApplicationProperties().getValue().containsKey(key)); if (value instanceof URI) { assertEquals(((URI) value).toString(), ((ServiceBusDescribedType) amqpMessage.getApplicationProperties().getValue().get(key)).getDescribed()); convertCount.getAndIncrement(); } else if (value instanceof Duration) { convertCount.getAndIncrement(); } else if (value instanceof OffsetDateTime) { convertCount.getAndIncrement(); } }); assertEquals(specificMap.size(), convertCount.get()); }
class ServiceBusMessageSerializerTest { private final ServiceBusMessageSerializer serializer = new ServiceBusMessageSerializer(); @Test void deserializeMessageNotNull() { assertThrows(NullPointerException.class, () -> serializer.deserialize(null, ServiceBusMessage.class)); } @Test void deserializeClassNotNull() { assertThrows(NullPointerException.class, () -> serializer.deserialize(Proton.message(), null)); } @Test void serializeObjectNotNull() { assertThrows(NullPointerException.class, () -> serializer.serialize(null)); } /** * Verify that we cannot serialize something that is not of type Message. */ @Test void cannotSerializeObject() { String something = "oops"; assertThrows(IllegalArgumentException.class, () -> serializer.serialize(something)); } /** * Verify we can only deserialize supported classes. */ @Test void cannotDeserializeObject() { final org.apache.qpid.proton.message.Message message = getMessage("hello-world".getBytes(UTF_8)); assertThrows(IllegalArgumentException.class, () -> serializer.deserialize(message, ServiceBusReceiverAsyncClient.class)); assertThrows(IllegalArgumentException.class, () -> serializer.deserializeList(message, ServiceBusReceiverAsyncClient.class)); } /** * Verify that we can deserialize a proton-j message with all the correct contents to {@link ServiceBusMessage}. */ @Test void deserializeMessage() { final String payload = "hello-world"; final byte[] payloadBytes = payload.getBytes(UTF_8); final org.apache.qpid.proton.message.Message message = getMessage(payloadBytes); message.setAddress("a-to-address"); message.setContentType("some-content-type"); message.setCorrelationId("correlation-id-test"); message.setDeliveryCount(10); message.setTtl(1045); message.setMessageId("a-test-message-id"); message.setSubject("this is a label"); message.getProperties().setTo("this is a to property"); message.setReplyTo("reply-to-property"); message.setReplyToGroupId("reply-to-session-id-property"); message.setGroupId("session-id-as-a-group-id"); Map<Symbol, Object> expectedMessageAnnotations = message.getMessageAnnotations().getValue(); expectedMessageAnnotations.put(Symbol.valueOf("A"), "A value"); Map<Symbol, Object> expectedDeliveryAnnotations = new HashMap<>(); expectedDeliveryAnnotations.put(Symbol.valueOf("D"), "D value"); message.setDeliveryAnnotations(new DeliveryAnnotations(expectedDeliveryAnnotations)); Map<Symbol, Object> expectedFooterValues = new HashMap<>(); expectedFooterValues.put(Symbol.valueOf("footer1"), "footer value"); message.setFooter(new Footer(expectedFooterValues)); final ServiceBusReceivedMessage actualMessage = serializer.deserialize(message, ServiceBusReceivedMessage.class); assertNotNull(actualMessage.getEnqueuedTime()); assertEquals(SEQUENCE_NUMBER, actualMessage.getSequenceNumber()); assertEquals(message.getTtl(), actualMessage.getTimeToLive().toMillis()); assertEquals(message.getSubject(), actualMessage.getSubject()); assertEquals(message.getReplyTo(), actualMessage.getReplyTo()); assertEquals(message.getDeliveryCount(), actualMessage.getDeliveryCount()); assertEquals(message.getProperties().getTo(), actualMessage.getTo()); assertEquals(message.getReplyToGroupId(), actualMessage.getReplyToSessionId()); assertEquals(message.getGroupId(), actualMessage.getSessionId()); assertEquals(message.getContentType(), actualMessage.getContentType()); assertEquals(message.getCorrelationId(), actualMessage.getCorrelationId()); assertValues(expectedMessageAnnotations, actualMessage.getRawAmqpMessage().getMessageAnnotations()); assertValues(expectedDeliveryAnnotations, actualMessage.getRawAmqpMessage().getDeliveryAnnotations()); assertValues(expectedFooterValues, actualMessage.getRawAmqpMessage().getFooter()); assertEquals(APPLICATION_PROPERTIES.size(), actualMessage.getApplicationProperties().size()); APPLICATION_PROPERTIES.forEach((key, value) -> { Assertions.assertTrue(actualMessage.getApplicationProperties().containsKey(key)); assertEquals(value, actualMessage.getApplicationProperties().get(key)); }); assertEquals(payload, actualMessage.getBody().toString()); } /** * Verifies that we can serialize OffsetDateTime, Duration and URI in application properties. */ @Test /** * Message with specific type send from .net SDK. * * ServiceBusMessage message = new ServiceBusMessage("Hello world!"); * DateTime utcTime1 = DateTime.Parse("2022-02-24T08:23:23.443127200Z"); * utcTime1 = DateTime.SpecifyKind(utcTime1, DateTimeKind.Utc); * message.ApplicationProperties.Add("time", utcTime2); * message.ApplicationProperties.Add("span", TimeSpan.FromSeconds(10)); * message.ApplicationProperties.Add("uri", new Uri("https: */ @Test void deserializeRealMessageFromByte() { byte[] data = new byte[] { 0, 83, 112, -64, 10, 5, 64, 64, 112, 72, 25, 8, 0, 64, 67, 0, 83, 113, -63, 36, 2, -93, 16, 120, 45, 111, 112, 116, 45, 108, 111, 99, 107, 45, 116, 111, 107, 101, 110, -104, -99, -119, 88, -41, -124, -37, 69, 10, -98, -95, -99, 119, -64, -61, 36, 90, 0, 83, 114, -63, 85, 6, -93, 19, 120, 45, 111, 112, 116, 45, 101, 110, 113, 117, 101, 117, 101, 100, 45, 116, 105, 109, 101, -125, 0, 0, 1, 127, 42, -30, 45, 43, -93, 21, 120, 45, 111, 112, 116, 45, 115, 101, 113, 117, 101, 110, 99, 101, 45, 110, 117, 109, 98, 101, 114, 85, 78, -93, 18, 120, 45, 111, 112, 116, 45, 108, 111, 99, 107, 101, 100, 45, 117, 110, 116, 105, 108, -125, 0, 0, 1, 127, 42, -30, -94, 106, 0, 83, 115, -64, 63, 13, -95, 32, 53, 98, 100, 50, 56, 100, 98, 97, 48, 56, 54, 99, 52, 98, 57, 99, 98, 55, 55, 49, 99, 100, 97, 97, 101, 102, 52, 51, 102, 102, 49, 98, 64, 64, 64, 64, 64, 64, 64, -125, 0, 0, 1, 127, 114, -5, 53, 43, -125, 0, 0, 1, 127, 42, -30, 45, 43, 64, 64, 64, 0, 83, 116, -63, -118, 6, -95, 4, 116, 105, 109, 101, 0, -93, 29, 99, 111, 109, 46, 109, 105, 99, 114, 111, 115, 111, 102, 116, 58, 100, 97, 116, 101, 116, 105, 109, 101, 45, 111, 102, 102, 115, 101, 116, -127, 8, -39, -9, -79, -6, -116, -83, 40, -95, 4, 115, 112, 97, 110, 0, -93, 22, 99, 111, 109, 46, 109, 105, 99, 114, 111, 115, 111, 102, 116, 58, 116, 105, 109, 101, 115, 112, 97, 110, -127, 0, 0, 0, 0, 5, -11, -31, 0, -95, 3, 117, 114, 105, 0, -93, 17, 99, 111, 109, 46, 109, 105, 99, 114, 111, 115, 111, 102, 116, 58, 117, 114, 105, -95, 23, 104, 116, 116, 112, 115, 58, 47, 47, 119, 119, 119, 46, 103, 105, 116, 104, 117, 98, 46, 99, 111, 109, 47, 0, 83, 117, -96, 12, 72, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100, 33 }; final Message amqpMessage = Proton.message(); amqpMessage.decode(data, 0, data.length); amqpMessage.setHeader(new Header()); final ServiceBusReceivedMessage actualMessage = serializer.deserialize(amqpMessage, ServiceBusReceivedMessage.class); AtomicInteger convertCount = new AtomicInteger(); HashMap<String, Object> specificMap = new HashMap<>(); specificMap.put("uri", URI.create("https: specificMap.put("span", Duration.ofSeconds(2)); specificMap.put("time", OffsetDateTime.parse("2022-02-24T08:23:23.443127200Z")); assertEquals(specificMap.size(), actualMessage.getApplicationProperties().size()); specificMap.forEach((key, value) -> { Assertions.assertTrue(actualMessage.getApplicationProperties().containsKey(key)); if (value instanceof URI) { assertEquals((URI) value, actualMessage.getApplicationProperties().get(key)); convertCount.getAndIncrement(); } else if (value instanceof Duration) { assertEquals((Duration) value, specificMap.get("span")); convertCount.getAndIncrement(); } else if (value instanceof OffsetDateTime) { assertEquals((OffsetDateTime) value, specificMap.get("time")); convertCount.getAndIncrement(); } }); assertEquals(specificMap.size(), convertCount.get()); } /** * Verifies that an empty collection is returned if the status code was not {@link AmqpResponseCode */ @Test void deserializeListMessagesNotOK() { final Map<String, Object> properties = new HashMap<>(); properties.put("status-code", AmqpResponseCode.FORBIDDEN.getValue()); final Message message = Proton.message(); message.setBody(new AmqpValue("test")); message.setApplicationProperties(new ApplicationProperties(properties)); final List<ServiceBusReceivedMessage> actual = serializer.deserializeList(message, ServiceBusReceivedMessage.class); Assertions.assertNotNull(actual); Assertions.assertTrue(actual.isEmpty()); } private void assertValues(Map<Symbol, Object> expected, Map<String, Object> actual) { assertEquals(expected.size(), actual.size()); for (Map.Entry<Symbol, Object> expectedEntry : expected.entrySet()) { assertEquals(expectedEntry.getValue(), actual.get(expectedEntry.getKey().toString())); } } }
class ServiceBusMessageSerializerTest { private final ServiceBusMessageSerializer serializer = new ServiceBusMessageSerializer(); @Test void deserializeMessageNotNull() { assertThrows(NullPointerException.class, () -> serializer.deserialize(null, ServiceBusMessage.class)); } @Test void deserializeClassNotNull() { assertThrows(NullPointerException.class, () -> serializer.deserialize(Proton.message(), null)); } @Test void serializeObjectNotNull() { assertThrows(NullPointerException.class, () -> serializer.serialize(null)); } /** * Verify that we cannot serialize something that is not of type Message. */ @Test void cannotSerializeObject() { String something = "oops"; assertThrows(IllegalArgumentException.class, () -> serializer.serialize(something)); } /** * Verify we can only deserialize supported classes. */ @Test void cannotDeserializeObject() { final org.apache.qpid.proton.message.Message message = getMessage("hello-world".getBytes(UTF_8)); assertThrows(IllegalArgumentException.class, () -> serializer.deserialize(message, ServiceBusReceiverAsyncClient.class)); assertThrows(IllegalArgumentException.class, () -> serializer.deserializeList(message, ServiceBusReceiverAsyncClient.class)); } /** * Verify that we can deserialize a proton-j message with all the correct contents to {@link ServiceBusMessage}. */ @Test void deserializeMessage() { final String payload = "hello-world"; final byte[] payloadBytes = payload.getBytes(UTF_8); final org.apache.qpid.proton.message.Message message = getMessage(payloadBytes); message.setAddress("a-to-address"); message.setContentType("some-content-type"); message.setCorrelationId("correlation-id-test"); message.setDeliveryCount(10); message.setTtl(1045); message.setMessageId("a-test-message-id"); message.setSubject("this is a label"); message.getProperties().setTo("this is a to property"); message.setReplyTo("reply-to-property"); message.setReplyToGroupId("reply-to-session-id-property"); message.setGroupId("session-id-as-a-group-id"); Map<Symbol, Object> expectedMessageAnnotations = message.getMessageAnnotations().getValue(); expectedMessageAnnotations.put(Symbol.valueOf("A"), "A value"); Map<Symbol, Object> expectedDeliveryAnnotations = new HashMap<>(); expectedDeliveryAnnotations.put(Symbol.valueOf("D"), "D value"); message.setDeliveryAnnotations(new DeliveryAnnotations(expectedDeliveryAnnotations)); Map<Symbol, Object> expectedFooterValues = new HashMap<>(); expectedFooterValues.put(Symbol.valueOf("footer1"), "footer value"); message.setFooter(new Footer(expectedFooterValues)); final ServiceBusReceivedMessage actualMessage = serializer.deserialize(message, ServiceBusReceivedMessage.class); assertNotNull(actualMessage.getEnqueuedTime()); assertEquals(SEQUENCE_NUMBER, actualMessage.getSequenceNumber()); assertEquals(message.getTtl(), actualMessage.getTimeToLive().toMillis()); assertEquals(message.getSubject(), actualMessage.getSubject()); assertEquals(message.getReplyTo(), actualMessage.getReplyTo()); assertEquals(message.getDeliveryCount(), actualMessage.getDeliveryCount()); assertEquals(message.getProperties().getTo(), actualMessage.getTo()); assertEquals(message.getReplyToGroupId(), actualMessage.getReplyToSessionId()); assertEquals(message.getGroupId(), actualMessage.getSessionId()); assertEquals(message.getContentType(), actualMessage.getContentType()); assertEquals(message.getCorrelationId(), actualMessage.getCorrelationId()); assertValues(expectedMessageAnnotations, actualMessage.getRawAmqpMessage().getMessageAnnotations()); assertValues(expectedDeliveryAnnotations, actualMessage.getRawAmqpMessage().getDeliveryAnnotations()); assertValues(expectedFooterValues, actualMessage.getRawAmqpMessage().getFooter()); assertEquals(APPLICATION_PROPERTIES.size(), actualMessage.getApplicationProperties().size()); APPLICATION_PROPERTIES.forEach((key, value) -> { Assertions.assertTrue(actualMessage.getApplicationProperties().containsKey(key)); assertEquals(value, actualMessage.getApplicationProperties().get(key)); }); assertEquals(payload, actualMessage.getBody().toString()); } /** * Verifies that we can serialize OffsetDateTime, Duration and URI in application properties. */ @Test /** * Message with specific type send from .net SDK. * * ServiceBusMessage message = new ServiceBusMessage("Hello world!"); * DateTime utcTime1 = DateTime.Parse("2022-02-24T08:23:23.443127200Z"); * utcTime1 = DateTime.SpecifyKind(utcTime1, DateTimeKind.Utc); * message.ApplicationProperties.Add("time", utcTime2); * message.ApplicationProperties.Add("span", TimeSpan.FromSeconds(10)); * message.ApplicationProperties.Add("uri", new Uri("https: */ @Test void deserializeRealMessageFromByte() { byte[] data = new byte[] { 0, 83, 112, -64, 10, 5, 64, 64, 112, 72, 25, 8, 0, 64, 67, 0, 83, 113, -63, 36, 2, -93, 16, 120, 45, 111, 112, 116, 45, 108, 111, 99, 107, 45, 116, 111, 107, 101, 110, -104, -99, -119, 88, -41, -124, -37, 69, 10, -98, -95, -99, 119, -64, -61, 36, 90, 0, 83, 114, -63, 85, 6, -93, 19, 120, 45, 111, 112, 116, 45, 101, 110, 113, 117, 101, 117, 101, 100, 45, 116, 105, 109, 101, -125, 0, 0, 1, 127, 42, -30, 45, 43, -93, 21, 120, 45, 111, 112, 116, 45, 115, 101, 113, 117, 101, 110, 99, 101, 45, 110, 117, 109, 98, 101, 114, 85, 78, -93, 18, 120, 45, 111, 112, 116, 45, 108, 111, 99, 107, 101, 100, 45, 117, 110, 116, 105, 108, -125, 0, 0, 1, 127, 42, -30, -94, 106, 0, 83, 115, -64, 63, 13, -95, 32, 53, 98, 100, 50, 56, 100, 98, 97, 48, 56, 54, 99, 52, 98, 57, 99, 98, 55, 55, 49, 99, 100, 97, 97, 101, 102, 52, 51, 102, 102, 49, 98, 64, 64, 64, 64, 64, 64, 64, -125, 0, 0, 1, 127, 114, -5, 53, 43, -125, 0, 0, 1, 127, 42, -30, 45, 43, 64, 64, 64, 0, 83, 116, -63, -118, 6, -95, 4, 116, 105, 109, 101, 0, -93, 29, 99, 111, 109, 46, 109, 105, 99, 114, 111, 115, 111, 102, 116, 58, 100, 97, 116, 101, 116, 105, 109, 101, 45, 111, 102, 102, 115, 101, 116, -127, 8, -39, -9, -79, -6, -116, -83, 40, -95, 4, 115, 112, 97, 110, 0, -93, 22, 99, 111, 109, 46, 109, 105, 99, 114, 111, 115, 111, 102, 116, 58, 116, 105, 109, 101, 115, 112, 97, 110, -127, 0, 0, 0, 0, 5, -11, -31, 0, -95, 3, 117, 114, 105, 0, -93, 17, 99, 111, 109, 46, 109, 105, 99, 114, 111, 115, 111, 102, 116, 58, 117, 114, 105, -95, 23, 104, 116, 116, 112, 115, 58, 47, 47, 119, 119, 119, 46, 103, 105, 116, 104, 117, 98, 46, 99, 111, 109, 47, 0, 83, 117, -96, 12, 72, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100, 33 }; final Message amqpMessage = Proton.message(); amqpMessage.decode(data, 0, data.length); amqpMessage.setHeader(new Header()); final ServiceBusReceivedMessage actualMessage = serializer.deserialize(amqpMessage, ServiceBusReceivedMessage.class); AtomicInteger convertCount = new AtomicInteger(); HashMap<String, Object> specificMap = new HashMap<>(); specificMap.put("uri", URI.create("https: specificMap.put("span", Duration.ofSeconds(2)); specificMap.put("time", OffsetDateTime.parse("2022-02-24T08:23:23.443127200Z")); assertEquals(specificMap.size(), actualMessage.getApplicationProperties().size()); specificMap.forEach((key, value) -> { Assertions.assertTrue(actualMessage.getApplicationProperties().containsKey(key)); if (value instanceof URI) { assertEquals((URI) value, actualMessage.getApplicationProperties().get(key)); convertCount.getAndIncrement(); } else if (value instanceof Duration) { assertEquals((Duration) value, specificMap.get("span")); convertCount.getAndIncrement(); } else if (value instanceof OffsetDateTime) { assertEquals((OffsetDateTime) value, specificMap.get("time")); convertCount.getAndIncrement(); } }); assertEquals(specificMap.size(), convertCount.get()); } /** * Verifies that an empty collection is returned if the status code was not {@link AmqpResponseCode */ @Test void deserializeListMessagesNotOK() { final Map<String, Object> properties = new HashMap<>(); properties.put("status-code", AmqpResponseCode.FORBIDDEN.getValue()); final Message message = Proton.message(); message.setBody(new AmqpValue("test")); message.setApplicationProperties(new ApplicationProperties(properties)); final List<ServiceBusReceivedMessage> actual = serializer.deserializeList(message, ServiceBusReceivedMessage.class); Assertions.assertNotNull(actual); Assertions.assertTrue(actual.isEmpty()); } private void assertValues(Map<Symbol, Object> expected, Map<String, Object> actual) { assertEquals(expected.size(), actual.size()); for (Map.Entry<Symbol, Object> expectedEntry : expected.entrySet()) { assertEquals(expectedEntry.getValue(), actual.get(expectedEntry.getKey().toString())); } } }
This part seems not correct `((String) this.getDescribed()).length()`? It is char length, not encoded byte length. I am not sure which version is required. Document it in superclass.
public int size() { return URI_SYMBOL.length() + ((String) this.getDescribed()).length(); }
return URI_SYMBOL.length() + ((String) this.getDescribed()).length();
public int size() { return URI_SYMBOL.length() + ((String) this.getDescribed()).getBytes(StandardCharsets.UTF_8).length; }
class UriDescribedType extends ServiceBusDescribedType { /** * Set described to describe data in described type. * * @param described real value in the described type. */ public UriDescribedType(Object described) { super(URI_SYMBOL, ((URI) described).toString()); } @Override }
class UriDescribedType extends ServiceBusDescribedType { /** * Set described to describe data in described type. * * @param uri set as described in DescribedType. */ public UriDescribedType(URI uri) { super(URI_SYMBOL, uri.toString()); } @Override }
Is it possible that the `value` is not a parsable double?
public boolean evaluate(FeatureFilterEvaluationContext context) { String value = String.valueOf(context.getParameters().get(PERCENTAGE_FILTER_SETTING)); boolean result = true; if (value.equals("null") || Double.parseDouble(value) < 0) { LOGGER.warn("The {} feature filter does not have a valid {} value for feature {}.", this.getClass().getSimpleName(), PERCENTAGE_FILTER_SETTING, context.getName()); result = false; } else { result = (Math.random() * 100) <= Double.parseDouble(value); } return result; }
if (value.equals("null") || Double.parseDouble(value) < 0) {
public boolean evaluate(FeatureFilterEvaluationContext context) { String value = String.valueOf(context.getParameters().get(PERCENTAGE_FILTER_SETTING)); boolean result = true; if (value.equals("null") || Double.parseDouble(value) < 0) { LOGGER.warn("The {} feature filter does not have a valid {} value for feature {}.", this.getClass().getSimpleName(), PERCENTAGE_FILTER_SETTING, context.getName()); result = false; } else { result = (Math.random() * 100) <= Double.parseDouble(value); } return result; }
class PercentageFilter implements FeatureFilter { private static final Logger LOGGER = LoggerFactory.getLogger(PercentageFilter.class); /** * Performs a percentage based evaluation to determine whether a feature is enabled. * * @param context The feature evaluation context. * @return True if the feature is enabled, false otherwise. */ @Override }
class PercentageFilter implements FeatureFilter { private static final Logger LOGGER = LoggerFactory.getLogger(PercentageFilter.class); /** * Performs a percentage based evaluation to determine whether a feature is enabled. * * @param context The feature evaluation context. * @return True if the feature is enabled, false otherwise. * @throws NumberFormatException if the percentage filter setting is not a parsable double */ @Override }
nit: please remove commented line
public void invalidClientEncryptionKeyException() throws Exception { CosmosAsyncContainer cosmosAsyncContainer = Mockito.mock(CosmosAsyncContainer.class); CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = Mockito.mock(CosmosEncryptionAsyncClient.class); Mockito.when(cosmosEncryptionAsyncClient.getEncryptionKeyWrapProvider()).thenReturn(keyStoreProvider); Mockito.when(cosmosEncryptionAsyncClientAccessor.getContainerPropertiesAsync(cosmosEncryptionAsyncClient, Mockito.any(CosmosAsyncContainer.class), Mockito.anyBoolean())).thenReturn(Mono.just(generateContainerWithCosmosEncryptionPolicy())); CosmosClientEncryptionKeyProperties keyProperties = generateClientEncryptionKeyProperties(); Mockito.when(cosmosEncryptionAsyncClientAccessor.getClientEncryptionPropertiesAsync(cosmosEncryptionAsyncClient, Mockito.anyString(), Mockito.anyString(), Mockito.any(CosmosAsyncContainer.class), Mockito.anyBoolean(), Mockito.nullable(String.class), Mockito.anyBoolean())).thenReturn(Mono.just(keyProperties)); EncryptionProcessor encryptionProcessor = new EncryptionProcessor(cosmosAsyncContainer, cosmosEncryptionAsyncClient); EncryptionSettings encryptionSettings = encryptionProcessor.getEncryptionSettings(); EncryptionSettings mockEncryptionSettings = Mockito.mock(EncryptionSettings.class); ReflectionUtils.setEncryptionSettings(encryptionProcessor, mockEncryptionSettings); Mockito.when(mockEncryptionSettings.buildProtectedDataEncryptionKey(Mockito.any(CosmosClientEncryptionKeyProperties.class), Mockito.any(EncryptionKeyStoreProvider.class), Mockito.anyString())). thenThrow(new InvalidKeyException()).thenReturn(encryptionSettings.buildProtectedDataEncryptionKey(keyProperties, encryptionKeyWrapProviderAccessor.getEncryptionKeyStoreProviderImpl(keyStoreProvider), keyProperties.getId())); Mockito.doNothing().when(mockEncryptionSettings).setEncryptionSettingForProperty(Mockito.anyString(), Mockito.any(EncryptionSettings.class), Mockito.any(Instant.class)); Assertions.assertThat(ReflectionUtils.isEncryptionSettingsInitDone(encryptionProcessor).get()).isFalse(); encryptionProcessor.initializeEncryptionSettingsAsync(false).block(); encryptionProcessor = new EncryptionProcessor(cosmosAsyncContainer, cosmosEncryptionAsyncClient); encryptionSettings = encryptionProcessor.getEncryptionSettings(); mockEncryptionSettings = Mockito.mock(EncryptionSettings.class); ReflectionUtils.setEncryptionSettings(encryptionProcessor, mockEncryptionSettings); Mockito.when(mockEncryptionSettings.buildProtectedDataEncryptionKey(Mockito.any(CosmosClientEncryptionKeyProperties.class), Mockito.any(EncryptionKeyStoreProvider.class), Mockito.anyString())). thenThrow(new InvalidKeyException(), new InvalidKeyException(), new InvalidKeyException()).thenReturn(encryptionSettings.buildProtectedDataEncryptionKey(keyProperties, encryptionKeyWrapProviderAccessor.getEncryptionKeyStoreProviderImpl(keyStoreProvider), keyProperties.getId())); try { encryptionProcessor.initializeEncryptionSettingsAsync(false).block(); fail("Expecting initializeEncryptionSettingsAsync to throw InvalidKeyException"); } catch (Exception ex) { InvalidKeyException invalidKeyException = Utils.as(ex.getCause(), InvalidKeyException.class); assertThat(invalidKeyException).isNotNull(); } }
public void invalidClientEncryptionKeyException() throws Exception { CosmosAsyncContainer cosmosAsyncContainer = Mockito.mock(CosmosAsyncContainer.class); CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = Mockito.mock(CosmosEncryptionAsyncClient.class); Mockito.when(cosmosEncryptionAsyncClient.getKeyEncryptionKeyResolver()).thenReturn(keyEncryptionKeyResolver); Mockito.when(cosmosEncryptionAsyncClientAccessor.getContainerPropertiesAsync(cosmosEncryptionAsyncClient, Mockito.any(CosmosAsyncContainer.class), Mockito.anyBoolean())).thenReturn(Mono.just(generateContainerWithCosmosEncryptionPolicy())); CosmosClientEncryptionKeyProperties keyProperties = generateClientEncryptionKeyProperties(); Mockito.when(cosmosEncryptionAsyncClientAccessor.getClientEncryptionPropertiesAsync(cosmosEncryptionAsyncClient, Mockito.anyString(), Mockito.anyString(), Mockito.any(CosmosAsyncContainer.class), Mockito.anyBoolean(), Mockito.nullable(String.class), Mockito.anyBoolean())).thenReturn(Mono.just(keyProperties)); EncryptionKeyStoreProviderImpl encryptionKeyStoreProvider = new EncryptionKeyStoreProviderImpl(keyEncryptionKeyResolver, "TEST_KEY_RESOLVER"); Mockito.when(cosmosEncryptionAsyncClientAccessor.getEncryptionKeyStoreProviderImpl(cosmosEncryptionAsyncClient)).thenReturn(encryptionKeyStoreProvider); EncryptionProcessor encryptionProcessor = new EncryptionProcessor(cosmosAsyncContainer, cosmosEncryptionAsyncClient); EncryptionSettings encryptionSettings = encryptionProcessor.getEncryptionSettings(); EncryptionSettings mockEncryptionSettings = Mockito.mock(EncryptionSettings.class); ReflectionUtils.setEncryptionSettings(encryptionProcessor, mockEncryptionSettings); Mockito.when(mockEncryptionSettings.buildProtectedDataEncryptionKey(Mockito.any(CosmosClientEncryptionKeyProperties.class), Mockito.any(EncryptionKeyStoreProvider.class), Mockito.anyString())). thenThrow(new InvalidKeyException()).thenReturn(encryptionSettings.buildProtectedDataEncryptionKey(keyProperties, cosmosEncryptionAsyncClientAccessor.getEncryptionKeyStoreProviderImpl(cosmosEncryptionAsyncClient), keyProperties.getId())); Mockito.doNothing().when(mockEncryptionSettings).setEncryptionSettingForProperty(Mockito.anyString(), Mockito.any(EncryptionSettings.class), Mockito.any(Instant.class)); Assertions.assertThat(ReflectionUtils.isEncryptionSettingsInitDone(encryptionProcessor).get()).isFalse(); encryptionProcessor.initializeEncryptionSettingsAsync(false).block(); encryptionProcessor = new EncryptionProcessor(cosmosAsyncContainer, cosmosEncryptionAsyncClient); encryptionSettings = encryptionProcessor.getEncryptionSettings(); mockEncryptionSettings = Mockito.mock(EncryptionSettings.class); ReflectionUtils.setEncryptionSettings(encryptionProcessor, mockEncryptionSettings); Mockito.when(mockEncryptionSettings.buildProtectedDataEncryptionKey(Mockito.any(CosmosClientEncryptionKeyProperties.class), Mockito.any(EncryptionKeyStoreProvider.class), Mockito.anyString())). thenThrow(new InvalidKeyException(), new InvalidKeyException(), new InvalidKeyException()).thenReturn(encryptionSettings.buildProtectedDataEncryptionKey(keyProperties, cosmosEncryptionAsyncClientAccessor.getEncryptionKeyStoreProviderImpl(cosmosEncryptionAsyncClient), keyProperties.getId())); try { encryptionProcessor.initializeEncryptionSettingsAsync(false).block(); fail("Expecting initializeEncryptionSettingsAsync to throw InvalidKeyException"); } catch (Exception ex) { InvalidKeyException invalidKeyException = Utils.as(ex.getCause(), InvalidKeyException.class); assertThat(invalidKeyException).isNotNull(); } }
class EncryptionProcessorAndSettingsTest { private static final int TIMEOUT = 6000_000; private static final ObjectMapper MAPPER = new ObjectMapper(); private static final EncryptionAsyncApiCrudTest.TestEncryptionKeyStoreProvider keyStoreProvider = new EncryptionAsyncApiCrudTest.TestEncryptionKeyStoreProvider(); private final static EncryptionImplementationBridgeHelpers.EncryptionKeyWrapProviderHelper.EncryptionKeyWrapProviderAccessor encryptionKeyWrapProviderAccessor = EncryptionImplementationBridgeHelpers.EncryptionKeyWrapProviderHelper.getEncryptionKeyWrapProviderAccessor(); private final static ImplementationBridgeHelpers.CosmosContainerPropertiesHelper.CosmosContainerPropertiesAccessor cosmosContainerPropertiesAccessor = ImplementationBridgeHelpers.CosmosContainerPropertiesHelper.getCosmosContainerPropertiesAccessor(); private final static EncryptionImplementationBridgeHelpers.CosmosEncryptionAsyncClientHelper.CosmosEncryptionAsyncClientAccessor cosmosEncryptionAsyncClientAccessor = EncryptionImplementationBridgeHelpers.CosmosEncryptionAsyncClientHelper.getCosmosEncryptionAsyncClientAccessor(); @Test(groups = {"unit"}, timeOut = TIMEOUT) public void initializeEncryptionSettingsAsync() throws Exception { CosmosAsyncContainer cosmosAsyncContainer = Mockito.mock(CosmosAsyncContainer.class); CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = Mockito.mock(CosmosEncryptionAsyncClient.class); Mockito.when(cosmosEncryptionAsyncClient.getEncryptionKeyWrapProvider()).thenReturn(keyStoreProvider); Mockito.when(cosmosEncryptionAsyncClientAccessor.getContainerPropertiesAsync(cosmosEncryptionAsyncClient, Mockito.any(CosmosAsyncContainer.class), Mockito.anyBoolean())).thenReturn(Mono.just(generateContainerWithCosmosEncryptionPolicy())); Mockito.when(cosmosEncryptionAsyncClientAccessor.getClientEncryptionPropertiesAsync(cosmosEncryptionAsyncClient, Mockito.anyString(), Mockito.anyString(), Mockito.any(CosmosAsyncContainer.class), Mockito.anyBoolean(), Mockito.nullable(String.class), Mockito.anyBoolean())).thenReturn(Mono.just(generateClientEncryptionKeyProperties())); EncryptionProcessor encryptionProcessor = new EncryptionProcessor(cosmosAsyncContainer, cosmosEncryptionAsyncClient); EncryptionSettings encryptionSettings = encryptionProcessor.getEncryptionSettings(); try { encryptionSettings.getEncryptionSettingCacheByPropertyName().getAsync("sensitiveString", null, null).block(); fail("encryptionSettings should be empty"); } catch (NullPointerException ex) { } Assertions.assertThat(ReflectionUtils.isEncryptionSettingsInitDone(encryptionProcessor).get()).isFalse(); encryptionProcessor.initializeEncryptionSettingsAsync(false).block(); encryptionSettings = encryptionProcessor.getEncryptionSettings(); CachedEncryptionSettings cachedEncryptionSettings = encryptionSettings.getEncryptionSettingCacheByPropertyName().getAsync("sensitiveString", null, null).block(); assertThat(ReflectionUtils.isEncryptionSettingsInitDone(encryptionProcessor)).isTrue(); assertThat(cachedEncryptionSettings).isNotNull(); assertThat(cachedEncryptionSettings.getEncryptionSettings().getClientEncryptionKeyId()).isEqualTo("key1"); } @Test(groups = {"unit"}, timeOut = TIMEOUT) public void withoutInitializeEncryptionSettingsAsync() throws Exception { CosmosAsyncContainer cosmosAsyncContainer = Mockito.mock(CosmosAsyncContainer.class); CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = Mockito.mock(CosmosEncryptionAsyncClient.class); Mockito.when(cosmosEncryptionAsyncClient.getEncryptionKeyWrapProvider()).thenReturn(keyStoreProvider); Mockito.when(cosmosEncryptionAsyncClientAccessor.getContainerPropertiesAsync(cosmosEncryptionAsyncClient, Mockito.any(CosmosAsyncContainer.class), Mockito.anyBoolean())).thenReturn(Mono.just(generateContainerWithCosmosEncryptionPolicy())); Mockito.when(cosmosEncryptionAsyncClientAccessor.getClientEncryptionPropertiesAsync(cosmosEncryptionAsyncClient, Mockito.anyString(), Mockito.anyString(), Mockito.any(CosmosAsyncContainer.class), Mockito.anyBoolean(), Mockito.nullable(String.class), Mockito.anyBoolean())).thenReturn(Mono.just(generateClientEncryptionKeyProperties())); EncryptionProcessor encryptionProcessor = new EncryptionProcessor(cosmosAsyncContainer, cosmosEncryptionAsyncClient); EncryptionSettings encryptionSettings = encryptionProcessor.getEncryptionSettings(); encryptionSettings.setDatabaseRid("TestDb"); try { encryptionSettings.getEncryptionSettingCacheByPropertyName().getAsync("sensitiveString", null, null).block(); fail("encryptionSettings should be empty"); } catch (NullPointerException ex) { } EncryptionSettings spyEncryptionSettings = Mockito.spy(encryptionSettings); EncryptionSettings cachedEncryptionSettings = spyEncryptionSettings.getEncryptionSettingForPropertyAsync("sensitiveString", encryptionProcessor).block(); assertThat(ReflectionUtils.isEncryptionSettingsInitDone(encryptionProcessor).get()).isFalse(); assertThat(cachedEncryptionSettings).isNotNull(); assertThat(cachedEncryptionSettings.getClientEncryptionKeyId()).isEqualTo("key1"); Mockito.verify(spyEncryptionSettings, Mockito.times(1)).fetchCachedEncryptionSettingsAsync(Mockito.anyString(), Mockito.any(EncryptionProcessor.class)); spyEncryptionSettings.getEncryptionSettingForPropertyAsync("sensitiveString", encryptionProcessor).block(); Mockito.verify(spyEncryptionSettings, Mockito.times(1)).fetchCachedEncryptionSettingsAsync(Mockito.anyString(), Mockito.any(EncryptionProcessor.class)); } @Test(groups = {"unit"}, timeOut = TIMEOUT) public void encryptionSettingCachedTimeToLive() throws Exception { CosmosAsyncContainer cosmosAsyncContainer = Mockito.mock(CosmosAsyncContainer.class); CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = Mockito.mock(CosmosEncryptionAsyncClient.class); Mockito.when(cosmosEncryptionAsyncClient.getEncryptionKeyWrapProvider()).thenReturn(keyStoreProvider); Mockito.when(cosmosEncryptionAsyncClientAccessor.getContainerPropertiesAsync(cosmosEncryptionAsyncClient, Mockito.any(CosmosAsyncContainer.class), Mockito.anyBoolean())).thenReturn(Mono.just(generateContainerWithCosmosEncryptionPolicy())); Mockito.when(cosmosEncryptionAsyncClientAccessor.getClientEncryptionPropertiesAsync(cosmosEncryptionAsyncClient, Mockito.anyString(), Mockito.anyString(), Mockito.any(CosmosAsyncContainer.class), Mockito.anyBoolean(), Mockito.nullable(String.class), Mockito.anyBoolean())).thenReturn(Mono.just(generateClientEncryptionKeyProperties())); EncryptionProcessor encryptionProcessor = new EncryptionProcessor(cosmosAsyncContainer, cosmosEncryptionAsyncClient); EncryptionSettings encryptionSettings = encryptionProcessor.getEncryptionSettings(); encryptionSettings.setDatabaseRid("TestDb"); EncryptionSettings spyEncryptionSettings = Mockito.spy(encryptionSettings); EncryptionSettings cachedEncryptionSettings = spyEncryptionSettings.getEncryptionSettingForPropertyAsync("sensitiveString", encryptionProcessor).block(); Mockito.verify(spyEncryptionSettings, Mockito.times(1)).fetchCachedEncryptionSettingsAsync(Mockito.anyString(), Mockito.any(EncryptionProcessor.class)); assertThat(cachedEncryptionSettings).isNotNull(); assertThat(cachedEncryptionSettings.getClientEncryptionKeyId()).isEqualTo("key1"); assertThat(cachedEncryptionSettings.getEncryptionSettingTimeToLive()).isAfter(Instant.now().plus(Duration.ofMinutes(59))); assertThat(cachedEncryptionSettings.getEncryptionSettingTimeToLive()).isBefore(Instant.now().plus(Duration.ofMinutes(61))); spyEncryptionSettings.setEncryptionSettingForProperty("sensitiveString", cachedEncryptionSettings, Instant.now()); spyEncryptionSettings.getEncryptionSettingForPropertyAsync("sensitiveString", encryptionProcessor).block(); Mockito.verify(spyEncryptionSettings, Mockito.times(2)).fetchCachedEncryptionSettingsAsync(Mockito.anyString(), Mockito.any(EncryptionProcessor.class)); } @Test(groups = {"unit"}, timeOut = TIMEOUT) private ClientEncryptionPolicy generateClientEncryptionPolicy() { ClientEncryptionIncludedPath includedPath1 = new ClientEncryptionIncludedPath(); includedPath1.setClientEncryptionKeyId("key1"); includedPath1.setPath("/sensitiveString"); includedPath1.setEncryptionType(CosmosEncryptionType.DETERMINISTIC.getName()); includedPath1.setEncryptionAlgorithm(CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256.getName()); List<ClientEncryptionIncludedPath> paths = new ArrayList<>(); paths.add(includedPath1); return new ClientEncryptionPolicy(paths); } private CosmosContainerProperties generateContainerWithCosmosEncryptionPolicy() { CosmosContainerProperties containerProperties = new CosmosContainerProperties(UUID.randomUUID().toString(), "/mypk"); cosmosContainerPropertiesAccessor.setSelfLink(containerProperties, "dbs/testDb/colls/testCol"); ClientEncryptionIncludedPath includedPath1 = new ClientEncryptionIncludedPath(); includedPath1.setClientEncryptionKeyId("key1"); includedPath1.setPath("/sensitiveString"); includedPath1.setEncryptionType(CosmosEncryptionType.DETERMINISTIC.getName()); includedPath1.setEncryptionAlgorithm(CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256.getName()); List<ClientEncryptionIncludedPath> paths = new ArrayList<>(); paths.add(includedPath1); return containerProperties.setClientEncryptionPolicy(new ClientEncryptionPolicy(paths)); } private CosmosClientEncryptionKeyProperties generateClientEncryptionKeyProperties() throws JsonProcessingException { TextNode treeNode = new TextNode("S84PieiyZNyHxeuUuX5IXSV2KOktpt02tQM4QLhm8dI="); byte[] key = MAPPER.treeToValue(treeNode, byte[].class); EncryptionKeyWrapMetadata metadata = new EncryptionKeyWrapMetadata(keyStoreProvider.getProviderName(), "key1", "tempmetadata1"); return new CosmosClientEncryptionKeyProperties("key1", CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256.getName(), key, metadata); } }
class EncryptionProcessorAndSettingsTest { private static final int TIMEOUT = 6000_000; private static final ObjectMapper MAPPER = new ObjectMapper(); private static final EncryptionAsyncApiCrudTest.TestKeyEncryptionKeyResolver keyEncryptionKeyResolver = new EncryptionAsyncApiCrudTest.TestKeyEncryptionKeyResolver(); private final static ImplementationBridgeHelpers.CosmosContainerPropertiesHelper.CosmosContainerPropertiesAccessor cosmosContainerPropertiesAccessor = ImplementationBridgeHelpers.CosmosContainerPropertiesHelper.getCosmosContainerPropertiesAccessor(); private final static EncryptionImplementationBridgeHelpers.CosmosEncryptionAsyncClientHelper.CosmosEncryptionAsyncClientAccessor cosmosEncryptionAsyncClientAccessor = EncryptionImplementationBridgeHelpers.CosmosEncryptionAsyncClientHelper.getCosmosEncryptionAsyncClientAccessor(); @Test(groups = {"unit"}, timeOut = TIMEOUT) public void initializeEncryptionSettingsAsync() throws Exception { CosmosAsyncContainer cosmosAsyncContainer = Mockito.mock(CosmosAsyncContainer.class); CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = Mockito.mock(CosmosEncryptionAsyncClient.class); Mockito.when(cosmosEncryptionAsyncClient.getKeyEncryptionKeyResolver()).thenReturn(keyEncryptionKeyResolver); Mockito.when(cosmosEncryptionAsyncClientAccessor.getContainerPropertiesAsync(cosmosEncryptionAsyncClient, Mockito.any(CosmosAsyncContainer.class), Mockito.anyBoolean())).thenReturn(Mono.just(generateContainerWithCosmosEncryptionPolicy())); Mockito.when(cosmosEncryptionAsyncClientAccessor.getClientEncryptionPropertiesAsync(cosmosEncryptionAsyncClient, Mockito.anyString(), Mockito.anyString(), Mockito.any(CosmosAsyncContainer.class), Mockito.anyBoolean(), Mockito.nullable(String.class), Mockito.anyBoolean())).thenReturn(Mono.just(generateClientEncryptionKeyProperties())); EncryptionKeyStoreProviderImpl encryptionKeyStoreProvider = new EncryptionKeyStoreProviderImpl(keyEncryptionKeyResolver, "TEST_KEY_RESOLVER"); Mockito.when(cosmosEncryptionAsyncClientAccessor.getEncryptionKeyStoreProviderImpl(cosmosEncryptionAsyncClient)).thenReturn(encryptionKeyStoreProvider); EncryptionProcessor encryptionProcessor = new EncryptionProcessor(cosmosAsyncContainer, cosmosEncryptionAsyncClient); EncryptionSettings encryptionSettings = encryptionProcessor.getEncryptionSettings(); try { encryptionSettings.getEncryptionSettingCacheByPropertyName().getAsync("sensitiveString", null, null).block(); fail("encryptionSettings should be empty"); } catch (NullPointerException ex) { } Assertions.assertThat(ReflectionUtils.isEncryptionSettingsInitDone(encryptionProcessor).get()).isFalse(); encryptionProcessor.initializeEncryptionSettingsAsync(false).block(); encryptionSettings = encryptionProcessor.getEncryptionSettings(); CachedEncryptionSettings cachedEncryptionSettings = encryptionSettings.getEncryptionSettingCacheByPropertyName().getAsync("sensitiveString", null, null).block(); assertThat(ReflectionUtils.isEncryptionSettingsInitDone(encryptionProcessor)).isTrue(); assertThat(cachedEncryptionSettings).isNotNull(); assertThat(cachedEncryptionSettings.getEncryptionSettings().getClientEncryptionKeyId()).isEqualTo("key1"); } @Test(groups = {"unit"}, timeOut = TIMEOUT) public void withoutInitializeEncryptionSettingsAsync() throws Exception { CosmosAsyncContainer cosmosAsyncContainer = Mockito.mock(CosmosAsyncContainer.class); CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = Mockito.mock(CosmosEncryptionAsyncClient.class); Mockito.when(cosmosEncryptionAsyncClient.getKeyEncryptionKeyResolver()).thenReturn(keyEncryptionKeyResolver); Mockito.when(cosmosEncryptionAsyncClientAccessor.getContainerPropertiesAsync(cosmosEncryptionAsyncClient, Mockito.any(CosmosAsyncContainer.class), Mockito.anyBoolean())).thenReturn(Mono.just(generateContainerWithCosmosEncryptionPolicy())); Mockito.when(cosmosEncryptionAsyncClientAccessor.getClientEncryptionPropertiesAsync(cosmosEncryptionAsyncClient, Mockito.anyString(), Mockito.anyString(), Mockito.any(CosmosAsyncContainer.class), Mockito.anyBoolean(), Mockito.nullable(String.class), Mockito.anyBoolean())).thenReturn(Mono.just(generateClientEncryptionKeyProperties())); EncryptionKeyStoreProviderImpl encryptionKeyStoreProvider = new EncryptionKeyStoreProviderImpl(keyEncryptionKeyResolver, "TEST_KEY_RESOLVER"); Mockito.when(cosmosEncryptionAsyncClientAccessor.getEncryptionKeyStoreProviderImpl(cosmosEncryptionAsyncClient)).thenReturn(encryptionKeyStoreProvider); EncryptionProcessor encryptionProcessor = new EncryptionProcessor(cosmosAsyncContainer, cosmosEncryptionAsyncClient); EncryptionSettings encryptionSettings = encryptionProcessor.getEncryptionSettings(); encryptionSettings.setDatabaseRid("TestDb"); try { encryptionSettings.getEncryptionSettingCacheByPropertyName().getAsync("sensitiveString", null, null).block(); fail("encryptionSettings should be empty"); } catch (NullPointerException ex) { } EncryptionSettings spyEncryptionSettings = Mockito.spy(encryptionSettings); EncryptionSettings cachedEncryptionSettings = spyEncryptionSettings.getEncryptionSettingForPropertyAsync("sensitiveString", encryptionProcessor).block(); assertThat(ReflectionUtils.isEncryptionSettingsInitDone(encryptionProcessor).get()).isFalse(); assertThat(cachedEncryptionSettings).isNotNull(); assertThat(cachedEncryptionSettings.getClientEncryptionKeyId()).isEqualTo("key1"); Mockito.verify(spyEncryptionSettings, Mockito.times(1)).fetchCachedEncryptionSettingsAsync(Mockito.anyString(), Mockito.any(EncryptionProcessor.class)); spyEncryptionSettings.getEncryptionSettingForPropertyAsync("sensitiveString", encryptionProcessor).block(); Mockito.verify(spyEncryptionSettings, Mockito.times(1)).fetchCachedEncryptionSettingsAsync(Mockito.anyString(), Mockito.any(EncryptionProcessor.class)); } @Test(groups = {"unit"}, timeOut = TIMEOUT) public void encryptionSettingCachedTimeToLive() throws Exception { CosmosAsyncContainer cosmosAsyncContainer = Mockito.mock(CosmosAsyncContainer.class); CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = Mockito.mock(CosmosEncryptionAsyncClient.class); Mockito.when(cosmosEncryptionAsyncClient.getKeyEncryptionKeyResolver()).thenReturn(keyEncryptionKeyResolver); Mockito.when(cosmosEncryptionAsyncClientAccessor.getContainerPropertiesAsync(cosmosEncryptionAsyncClient, Mockito.any(CosmosAsyncContainer.class), Mockito.anyBoolean())).thenReturn(Mono.just(generateContainerWithCosmosEncryptionPolicy())); Mockito.when(cosmosEncryptionAsyncClientAccessor.getClientEncryptionPropertiesAsync(cosmosEncryptionAsyncClient, Mockito.anyString(), Mockito.anyString(), Mockito.any(CosmosAsyncContainer.class), Mockito.anyBoolean(), Mockito.nullable(String.class), Mockito.anyBoolean())).thenReturn(Mono.just(generateClientEncryptionKeyProperties())); EncryptionKeyStoreProviderImpl encryptionKeyStoreProvider = new EncryptionKeyStoreProviderImpl(keyEncryptionKeyResolver, "TEST_KEY_RESOLVER"); Mockito.when(cosmosEncryptionAsyncClientAccessor.getEncryptionKeyStoreProviderImpl(cosmosEncryptionAsyncClient)).thenReturn(encryptionKeyStoreProvider); EncryptionProcessor encryptionProcessor = new EncryptionProcessor(cosmosAsyncContainer, cosmosEncryptionAsyncClient); EncryptionSettings encryptionSettings = encryptionProcessor.getEncryptionSettings(); encryptionSettings.setDatabaseRid("TestDb"); EncryptionSettings spyEncryptionSettings = Mockito.spy(encryptionSettings); EncryptionSettings cachedEncryptionSettings = spyEncryptionSettings.getEncryptionSettingForPropertyAsync("sensitiveString", encryptionProcessor).block(); Mockito.verify(spyEncryptionSettings, Mockito.times(1)).fetchCachedEncryptionSettingsAsync(Mockito.anyString(), Mockito.any(EncryptionProcessor.class)); assertThat(cachedEncryptionSettings).isNotNull(); assertThat(cachedEncryptionSettings.getClientEncryptionKeyId()).isEqualTo("key1"); assertThat(cachedEncryptionSettings.getEncryptionSettingTimeToLive()).isAfter(Instant.now().plus(Duration.ofMinutes(59))); assertThat(cachedEncryptionSettings.getEncryptionSettingTimeToLive()).isBefore(Instant.now().plus(Duration.ofMinutes(61))); spyEncryptionSettings.setEncryptionSettingForProperty("sensitiveString", cachedEncryptionSettings, Instant.now().minus(Duration.ofSeconds(5))); spyEncryptionSettings.getEncryptionSettingForPropertyAsync("sensitiveString", encryptionProcessor).block(); Mockito.verify(spyEncryptionSettings, Mockito.times(2)).fetchCachedEncryptionSettingsAsync(Mockito.anyString(), Mockito.any(EncryptionProcessor.class)); } @Test(groups = {"unit"}, timeOut = TIMEOUT) private ClientEncryptionPolicy generateClientEncryptionPolicy() { ClientEncryptionIncludedPath includedPath1 = new ClientEncryptionIncludedPath(); includedPath1.setClientEncryptionKeyId("key1"); includedPath1.setPath("/sensitiveString"); includedPath1.setEncryptionType(CosmosEncryptionType.DETERMINISTIC.getName()); includedPath1.setEncryptionAlgorithm(CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256.getName()); List<ClientEncryptionIncludedPath> paths = new ArrayList<>(); paths.add(includedPath1); return new ClientEncryptionPolicy(paths); } private CosmosContainerProperties generateContainerWithCosmosEncryptionPolicy() { CosmosContainerProperties containerProperties = new CosmosContainerProperties(UUID.randomUUID().toString(), "/mypk"); cosmosContainerPropertiesAccessor.setSelfLink(containerProperties, "dbs/testDb/colls/testCol"); ClientEncryptionIncludedPath includedPath1 = new ClientEncryptionIncludedPath(); includedPath1.setClientEncryptionKeyId("key1"); includedPath1.setPath("/sensitiveString"); includedPath1.setEncryptionType(CosmosEncryptionType.DETERMINISTIC.getName()); includedPath1.setEncryptionAlgorithm(CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256.getName()); List<ClientEncryptionIncludedPath> paths = new ArrayList<>(); paths.add(includedPath1); return containerProperties.setClientEncryptionPolicy(new ClientEncryptionPolicy(paths)); } private CosmosClientEncryptionKeyProperties generateClientEncryptionKeyProperties() throws JsonProcessingException { TextNode treeNode = new TextNode("S84PieiyZNyHxeuUuX5IXSV2KOktpt02tQM4QLhm8dI="); byte[] key = MAPPER.treeToValue(treeNode, byte[].class); EncryptionKeyWrapMetadata metadata = new EncryptionKeyWrapMetadata("TEST_KEY_RESOLVER", "key1", "tempmetadata1", "RSA-OAEP"); return new CosmosClientEncryptionKeyProperties("key1", CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256.getName(), key, metadata); } }
You are returning little `l` `long` here - but your comment says it returns null if a virtual directory?
public long size() { return this.internalAttributes.size(); }
}
public long size() { return this.internalAttributes.size(); }
class AzureBasicFileAttributes implements BasicFileAttributes { private final ClientLogger logger = new ClientLogger(AzureBasicFileAttributes.class); static final Set<String> ATTRIBUTE_STRINGS; static { Set<String> set = new HashSet<>(); set.add("lastModifiedTime"); set.add("isRegularFile"); set.add("isDirectory"); set.add("isVirtualDirectory"); set.add("isSymbolicLink"); set.add("isOther"); set.add("size"); set.add("creationTime"); ATTRIBUTE_STRINGS = Collections.unmodifiableSet(set); } private final AzureBlobFileAttributes internalAttributes; /* In order to support Files.exist() and other methods like Files.walkFileTree() which depend on it, we have had to add support for virtual directories. This is not ideal as customers will have to now perform null checks when inspecting attributes (or at least check if it is a virtual directory before inspecting properties). It also incurs extra network requests as we have to call a checkDirectoryExists() after receiving the initial 404. This is two additional network requests, though they only happen in the case when a file doesn't exist or is virtual, so it shouldn't happen in the majority of api calls. */ AzureBasicFileAttributes(Path path) throws IOException { this.internalAttributes = new AzureBlobFileAttributes(path); } /** * Returns the time of last modification or null if this is a virtual directory. * * @return the time of last modification or null if this is a virtual directory */ @Override public FileTime lastModifiedTime() { return this.internalAttributes.lastModifiedTime(); } /** * Returns the time of last modification or null if this is a virtual directory * <p> * Last access time is not supported by the blob service. In this case, it is typical for implementations to return * the {@link * * @return the time of last modification or null if this is a virtual directory */ @Override public FileTime lastAccessTime() { return this.internalAttributes.lastAccessTime(); } /** * Returns the creation time. The creation time is the time that the file was created. Returns null if this is a * virtual directory. * * @return The creation time or null if this is a virtual directory */ @Override public FileTime creationTime() { return this.internalAttributes.creationTime(); } /** * Tells whether the file is a regular file with opaque content. * * @return whether the file is a regular file. */ @Override public boolean isRegularFile() { return this.internalAttributes.isRegularFile(); } /** * Tells whether the file is a directory. * <p> * Will only return true if the directory is a concrete directory. See * {@link AzureFileSystemProvider * concrete directories. * * @return whether the file is a directory */ @Override public boolean isDirectory() { return this.internalAttributes.isDirectory(); } /** * Tells whether the file is a virtual directory. * <p> * See {@link AzureFileSystemProvider * concrete directories. * * @return whether the file is a virtual directory */ public boolean isVirtualDirectory() { return this.internalAttributes.isVirtualDirectory(); } /** * Tells whether the file is a symbolic link. * * @return false. Symbolic links are not supported. */ @Override public boolean isSymbolicLink() { return this.internalAttributes.isSymbolicLink(); } /** * Tells whether the file is something other than a regular file, directory, or symbolic link. * * @return false. No other object types are supported. */ @Override public boolean isOther() { return this.internalAttributes.isOther(); } /** * Returns the size of the file (in bytes) or null if this is a virtual directory. * * @return the size of the file or null if this is a virtual directory */ @Override /** * Returns the url of the resource. * * @return The file key, which is the url. */ @Override public Object fileKey() { return this.internalAttributes.fileKey(); } }
class AzureBasicFileAttributes implements BasicFileAttributes { private final ClientLogger logger = new ClientLogger(AzureBasicFileAttributes.class); static final Set<String> ATTRIBUTE_STRINGS; static { Set<String> set = new HashSet<>(); set.add("lastModifiedTime"); set.add("isRegularFile"); set.add("isDirectory"); set.add("isVirtualDirectory"); set.add("isSymbolicLink"); set.add("isOther"); set.add("size"); set.add("creationTime"); ATTRIBUTE_STRINGS = Collections.unmodifiableSet(set); } private final AzureBlobFileAttributes internalAttributes; /* In order to support Files.exist() and other methods like Files.walkFileTree() which depend on it, we have had to add support for virtual directories. This is not ideal as customers will have to now perform null checks when inspecting attributes (or at least check if it is a virtual directory before inspecting properties). It also incurs extra network requests as we have to call a checkDirectoryExists() after receiving the initial 404. This is two additional network requests, though they only happen in the case when a file doesn't exist or is virtual, so it shouldn't happen in the majority of api calls. */ AzureBasicFileAttributes(Path path) throws IOException { this.internalAttributes = new AzureBlobFileAttributes(path); } /** * Returns the time of last modification or null if this is a virtual directory. * * @return the time of last modification or null if this is a virtual directory */ @Override public FileTime lastModifiedTime() { return this.internalAttributes.lastModifiedTime(); } /** * Returns the time of last modification or null if this is a virtual directory * <p> * Last access time is not supported by the blob service. In this case, it is typical for implementations to return * the {@link * * @return the time of last modification or null if this is a virtual directory */ @Override public FileTime lastAccessTime() { return this.internalAttributes.lastAccessTime(); } /** * Returns the creation time. The creation time is the time that the file was created. Returns null if this is a * virtual directory. * * @return The creation time or null if this is a virtual directory */ @Override public FileTime creationTime() { return this.internalAttributes.creationTime(); } /** * Tells whether the file is a regular file with opaque content. * * @return whether the file is a regular file. */ @Override public boolean isRegularFile() { return this.internalAttributes.isRegularFile(); } /** * Tells whether the file is a directory. * <p> * Will only return true if the directory is a concrete directory. See * {@link AzureFileSystemProvider * concrete directories. * * @return whether the file is a directory */ @Override public boolean isDirectory() { return this.internalAttributes.isDirectory(); } /** * Tells whether the file is a virtual directory. * <p> * See {@link AzureFileSystemProvider * concrete directories. * * @return whether the file is a virtual directory */ public boolean isVirtualDirectory() { return this.internalAttributes.isVirtualDirectory(); } /** * Tells whether the file is a symbolic link. * * @return false. Symbolic links are not supported. */ @Override public boolean isSymbolicLink() { return this.internalAttributes.isSymbolicLink(); } /** * Tells whether the file is something other than a regular file, directory, or symbolic link. * * @return false. No other object types are supported. */ @Override public boolean isOther() { return this.internalAttributes.isOther(); } /** * Returns the size of the file (in bytes). * * @return the size of the file */ @Override /** * Returns the url of the resource. * * @return The file key, which is the url. */ @Override public Object fileKey() { return this.internalAttributes.fileKey(); } }
As a minor stylistic comment (and as such, I'm not expecting anything from saying this), one pattern I've favoured over the years is to reverse this code approach, to reduce indentation and 'fall out' of the method early, rather than late. In this case, I would have considered doing it as such: ```java public long size() { if (this.isVirtualDirectory) { return 0; } return properties.getBlobSize(); } ``` In some projects, we even had a code style expectation that such simple return statements didn't need to have braces, so we would have made it simpler and on one line, as below: ```java public long size() { if (this.isVirtualDirectory) return 0; return properties.getBlobSize(); } ``` Of course, at this point we might as well go the whole way and just do a single-line ternary expression: ```java public long size() { return this.isVirtualDirectory ? 0 : properties.getBlobSize(); } ```
public long size() { if (!this.isVirtualDirectory) { return properties.getBlobSize(); } else { return 0; } }
}
public long size() { return !this.isVirtualDirectory ? properties.getBlobSize() : 0; }
class AzureBlobFileAttributes implements BasicFileAttributes { /* Some blob properties do not have getters as they do not make sense in the context of nio. These properties are: - incremental snapshot related properties (only for page blobs) - lease related properties (leases not currently supported) - sequence number (only for page blobs) - encryption key sha256 (cpk not supported) - committed block count (only for append blobs) */ private final ClientLogger logger = new ClientLogger(AzureBlobFileAttributes.class); private final BlobProperties properties; private final AzureResource resource; private final boolean isVirtualDirectory; AzureBlobFileAttributes(Path path) throws IOException { this.resource = new AzureResource(path); BlobProperties props = null; try { props = resource.getBlobClient().getProperties(); } catch (BlobStorageException e) { if (e.getStatusCode() == 404 && this.resource.checkDirectoryExists()) { this.isVirtualDirectory = true; this.properties = null; return; } else { throw LoggingUtility.logError(logger, new IOException("Path: " + path.toString(), e)); } } this.properties = props; this.isVirtualDirectory = false; } static Map<String, Supplier<Object>> getAttributeSuppliers(AzureBlobFileAttributes attributes) { Map<String, Supplier<Object>> map = new HashMap<>(); map.put("creationTime", attributes::creationTime); map.put("lastModifiedTime", attributes::lastModifiedTime); map.put("eTag", attributes::eTag); map.put("blobHttpHeaders", attributes::blobHttpHeaders); map.put("blobType", attributes::blobType); map.put("copyId", attributes::copyId); map.put("copyStatus", attributes::copyStatus); map.put("copySource", attributes::copySource); map.put("copyProgress", attributes::copyProgress); map.put("copyCompletionTime", attributes::copyCompletionTime); map.put("copyStatusDescription", attributes::copyStatusDescription); map.put("isServerEncrypted", attributes::isServerEncrypted); map.put("accessTier", attributes::accessTier); map.put("isAccessTierInferred", attributes::isAccessTierInferred); map.put("archiveStatus", attributes::archiveStatus); map.put("accessTierChangeTime", attributes::accessTierChangeTime); map.put("metadata", attributes::metadata); map.put("isRegularFile", attributes::isRegularFile); map.put("isDirectory", attributes::isDirectory); map.put("isVirtualDirectory", attributes::isVirtualDirectory); map.put("isSymbolicLink", attributes::isSymbolicLink); map.put("isOther", attributes::isOther); map.put("size", attributes::size); return map; } /** * Returns the creation time. The creation time is the time that the file was created. Returns null if this is a * virtual directory. * * @return The creation time or */ @Override public FileTime creationTime() { if (!this.isVirtualDirectory) { return FileTime.from(this.properties.getCreationTime().toInstant()); } else { return null; } } /** * Returns the time of last modification. Returns null if this is a virtual directory * * @return the time of last modification or null if this is a virtual directory */ @Override public FileTime lastModifiedTime() { if (!this.isVirtualDirectory) { return FileTime.from(this.properties.getLastModified().toInstant()); } else { return null; } } /** * Returns the eTag of the blob or null if this is a virtual directory * * @return the eTag of the blob or null if this is a virtual directory */ public String eTag() { if (!this.isVirtualDirectory) { return this.properties.getETag(); } else { return null; } } /** * Returns the {@link BlobHttpHeaders} of the blob or null if this is a virtual directory. * * @return {@link BlobHttpHeaders} or null if this is a virtual directory */ public BlobHttpHeaders blobHttpHeaders() { if (!this.isVirtualDirectory) { /* We return these all as one value so it's consistent with the way of setting, especially the setAttribute method that accepts a string argument for the name of the property. Returning them individually would mean we have to support setting them individually as well, which is not possible due to service constraints. */ return new BlobHttpHeaders() .setContentType(this.properties.getContentType()) .setContentLanguage(this.properties.getContentLanguage()) .setContentMd5(this.properties.getContentMd5()) .setContentDisposition(this.properties.getContentDisposition()) .setContentEncoding(this.properties.getContentEncoding()) .setCacheControl(this.properties.getCacheControl()); } else { return null; } } /** * Returns the type of the blob or null if this is a virtual directory * * @return the type of the blob or null if this is a virtual directory */ public BlobType blobType() { if (!this.isVirtualDirectory) { return this.properties.getBlobType(); } else { return null; } } /** * Returns the identifier of the last copy operation. If this blob hasn't been the target of a copy operation or has * been modified since this won't be set. Returns null if this is a virtual directory * * @return the identifier of the last copy operation or null if this is a virtual directory */ public String copyId() { if (!this.isVirtualDirectory) { return this.properties.getCopyId(); } else { return null; } } /** * Returns the status of the last copy operation. If this blob hasn't been the target of a copy operation or has * been modified since this won't be set. Returns null if this is a virtual directory * * @return the status of the last copy operation or null if this is a virtual directory */ public CopyStatusType copyStatus() { if (!this.isVirtualDirectory) { return this.properties.getCopyStatus(); } else { return null; } } /** * Returns the source blob URL from the last copy operation. If this blob hasn't been the target of a copy operation * or has been modified since this won't be set. Returns null if this is a virtual directory * * @return the source blob URL from the last copy operation or null if this is a virtual directory */ public String copySource() { if (!this.isVirtualDirectory) { return this.properties.getCopySource(); } else { return null; } } /** * Returns the number of bytes copied and total bytes in the source from the last copy operation (bytes copied/total * bytes). If this blob hasn't been the target of a copy operation or has been modified since this won't be set. * Returns null if this is a virtual directory * * @return the number of bytes copied and total bytes in the source from the last copy operation null if this is a * virtual directory */ public String copyProgress() { if (!this.isVirtualDirectory) { return this.properties.getCopyProgress(); } else { return null; } } /** * Returns the completion time of the last copy operation. If this blob hasn't been the target of a copy operation * or has been modified since this won't be set. Returns null if this is a virtual directory. * * @return the completion time of the last copy operation or null if this is a virtual directory */ public OffsetDateTime copyCompletionTime() { if (!this.isVirtualDirectory) { return this.properties.getCopyCompletionTime(); } else { return null; } } /** * Returns the description of the last copy failure, this is set when the {@link * {@link CopyStatusType * target of a copy operation or has been modified since this won't be set. Returns null if this is a virtual * directory. * * @return the description of the last copy failure or null if this is a virtual directory */ public String copyStatusDescription() { if (!this.isVirtualDirectory) { return this.properties.getCopyStatusDescription(); } else { return null; } } /** * Returns the status of the blob being encrypted on the server or null if this is a virtual directory. * * @return the status of the blob being encrypted on the server or null if this is a virtual directory */ public Boolean isServerEncrypted() { if (!this.isVirtualDirectory) { return this.properties.isServerEncrypted(); } else { return null; } } /** * Returns the tier of the blob. This is only set for Page blobs on a premium storage account or for Block blobs on * blob storage or general purpose V2 account. Returns null if this is a virtual directory. * * @return the tier of the blob or null if this is a virtual directory */ public AccessTier accessTier() { if (!this.isVirtualDirectory) { return this.properties.getAccessTier(); } else { return null; } } /** * Returns the status of the tier being inferred for the blob. This is only set for Page blobs on a premium storage * account or for Block blobs on blob storage or general purpose V2 account. Returns null if this is a virtual * directory. * * @return the status of the tier being inferred for the blob or null if this is a virtual directory */ public Boolean isAccessTierInferred() { if (!this.isVirtualDirectory) { return this.properties.isAccessTierInferred(); } else { return null; } } /** * Returns the archive status of the blob. This is only for blobs on a blob storage and general purpose v2 account. * Returns null if this is a virtual directory. * * @return the archive status of the blob or null if this is a virtual directory */ public ArchiveStatus archiveStatus() { if (!this.isVirtualDirectory) { return this.properties.getArchiveStatus(); } else { return null; } } /** * Returns the time when the access tier for the blob was last changed or null if this is a virtual directory. * * @return the time when the access tier for the blob was last changed or null if this is a virtual directory */ public OffsetDateTime accessTierChangeTime() { if (!this.isVirtualDirectory) { return this.properties.getAccessTierChangeTime(); } else { return null; } } /** * Returns the metadata associated with this blob or null if this is a virtual directory. * * @return the metadata associated with this blob or null if this is a virtual directory */ public Map<String, String> metadata() { if (!this.isVirtualDirectory) { return Collections.unmodifiableMap(this.properties.getMetadata()); } else { return null; } } /** * Returns the time of last modification or null if this is a virtual directory. * <p> * Last access time is not supported by the blob service. In this case, it is typical for implementations to return * the {@link * * @return the time of last modification null if this is a virtual directory */ @Override public FileTime lastAccessTime() { if (!this.isVirtualDirectory) { return this.lastModifiedTime(); } else { return null; } } /** * Tells whether the file is a regular file with opaque content. * * @return whether the file is a regular file. */ @Override public boolean isRegularFile() { if (!this.isVirtualDirectory) { return !this.properties.getMetadata().getOrDefault(AzureResource.DIR_METADATA_MARKER, "false").equals("true"); } else { return false; } } /** * Tells whether the file is a directory. * <p> * Will return true if the directory is a concrete or virtual directory. See * {@link AzureFileSystemProvider * concrete directories. * * @return whether the file is a directory */ @Override public boolean isDirectory() { return !this.isRegularFile(); } /** * Tells whether the file is a virtual directory. * <p> * See {@link AzureFileSystemProvider * concrete directories. * * @return whether the file is a virtual directory */ public boolean isVirtualDirectory() { return this.isVirtualDirectory; } /** * Tells whether the file is a symbolic link. * * @return false. Symbolic links are not supported. */ @Override public boolean isSymbolicLink() { return false; } /** * Tells whether the file is something other than a regular file, directory, or symbolic link. * * @return false. No other object types are supported. */ @Override public boolean isOther() { return false; } /** * Returns the size of the file (in bytes). * * @return the size of the file */ @Override /** * Returns the url of the resource. * * @return The file key, which is the url. */ @Override public Object fileKey() { return resource.getBlobClient().getBlobUrl(); } }
class AzureBlobFileAttributes implements BasicFileAttributes { /* Some blob properties do not have getters as they do not make sense in the context of nio. These properties are: - incremental snapshot related properties (only for page blobs) - lease related properties (leases not currently supported) - sequence number (only for page blobs) - encryption key sha256 (cpk not supported) - committed block count (only for append blobs) */ private final ClientLogger logger = new ClientLogger(AzureBlobFileAttributes.class); private final BlobProperties properties; private final AzureResource resource; private final boolean isVirtualDirectory; AzureBlobFileAttributes(Path path) throws IOException { this.resource = new AzureResource(path); BlobProperties props = null; try { props = resource.getBlobClient().getProperties(); } catch (BlobStorageException e) { if (e.getStatusCode() == 404 && this.resource.checkVirtualDirectoryExists()) { this.isVirtualDirectory = true; this.properties = null; return; } else { throw LoggingUtility.logError(logger, new IOException("Path: " + path.toString(), e)); } } this.properties = props; this.isVirtualDirectory = false; } static Map<String, Supplier<Object>> getAttributeSuppliers(AzureBlobFileAttributes attributes) { Map<String, Supplier<Object>> map = new HashMap<>(); map.put("creationTime", attributes::creationTime); map.put("lastModifiedTime", attributes::lastModifiedTime); map.put("eTag", attributes::eTag); map.put("blobHttpHeaders", attributes::blobHttpHeaders); map.put("blobType", attributes::blobType); map.put("copyId", attributes::copyId); map.put("copyStatus", attributes::copyStatus); map.put("copySource", attributes::copySource); map.put("copyProgress", attributes::copyProgress); map.put("copyCompletionTime", attributes::copyCompletionTime); map.put("copyStatusDescription", attributes::copyStatusDescription); map.put("isServerEncrypted", attributes::isServerEncrypted); map.put("accessTier", attributes::accessTier); map.put("isAccessTierInferred", attributes::isAccessTierInferred); map.put("archiveStatus", attributes::archiveStatus); map.put("accessTierChangeTime", attributes::accessTierChangeTime); map.put("metadata", attributes::metadata); map.put("isRegularFile", attributes::isRegularFile); map.put("isDirectory", attributes::isDirectory); map.put("isVirtualDirectory", attributes::isVirtualDirectory); map.put("isSymbolicLink", attributes::isSymbolicLink); map.put("isOther", attributes::isOther); map.put("size", attributes::size); return map; } /** * Returns the creation time. The creation time is the time that the file was created. Returns null if this is a * virtual directory. * * @return The creation time or null if this is a virtual directory */ @Override public FileTime creationTime() { return !this.isVirtualDirectory ? FileTime.from(this.properties.getCreationTime().toInstant()) : null; } /** * Returns the time of last modification. Returns null if this is a virtual directory * * @return the time of last modification or null if this is a virtual directory */ @Override public FileTime lastModifiedTime() { return !this.isVirtualDirectory ? FileTime.from(this.properties.getLastModified().toInstant()) : null; } /** * Returns the eTag of the blob or null if this is a virtual directory * * @return the eTag of the blob or null if this is a virtual directory */ public String eTag() { return !this.isVirtualDirectory ? this.properties.getETag() : null; } /** * Returns the {@link BlobHttpHeaders} of the blob or null if this is a virtual directory. * * @return {@link BlobHttpHeaders} or null if this is a virtual directory */ public BlobHttpHeaders blobHttpHeaders() { if (this.isVirtualDirectory) { return null; } /* We return these all as one value, so it's consistent with the way of setting, especially the setAttribute method that accepts a string argument for the name of the property. Returning them individually would mean we have to support setting them individually as well, which is not possible due to service constraints. */ return new BlobHttpHeaders() .setContentType(this.properties.getContentType()) .setContentLanguage(this.properties.getContentLanguage()) .setContentMd5(this.properties.getContentMd5()) .setContentDisposition(this.properties.getContentDisposition()) .setContentEncoding(this.properties.getContentEncoding()) .setCacheControl(this.properties.getCacheControl()); } /** * Returns the type of the blob or null if this is a virtual directory * * @return the type of the blob or null if this is a virtual directory */ public BlobType blobType() { return !this.isVirtualDirectory ? this.properties.getBlobType() : null; } /** * Returns the identifier of the last copy operation. If this blob hasn't been the target of a copy operation or has * been modified since this won't be set. Returns null if this is a virtual directory * * @return the identifier of the last copy operation or null if this is a virtual directory */ public String copyId() { return !this.isVirtualDirectory ? this.properties.getCopyId() : null; } /** * Returns the status of the last copy operation. If this blob hasn't been the target of a copy operation or has * been modified since this won't be set. Returns null if this is a virtual directory * * @return the status of the last copy operation or null if this is a virtual directory */ public CopyStatusType copyStatus() { return !this.isVirtualDirectory ? this.properties.getCopyStatus() : null; } /** * Returns the source blob URL from the last copy operation. If this blob hasn't been the target of a copy operation * or has been modified since this won't be set. Returns null if this is a virtual directory * * @return the source blob URL from the last copy operation or null if this is a virtual directory */ public String copySource() { return !this.isVirtualDirectory ? this.properties.getCopySource() : null; } /** * Returns the number of bytes copied and total bytes in the source from the last copy operation (bytes copied/total * bytes). If this blob hasn't been the target of a copy operation or has been modified since this won't be set. * Returns null if this is a virtual directory * * @return the number of bytes copied and total bytes in the source from the last copy operation null if this is a * virtual directory */ public String copyProgress() { return !this.isVirtualDirectory ? this.properties.getCopyProgress() : null; } /** * Returns the completion time of the last copy operation. If this blob hasn't been the target of a copy operation * or has been modified since this won't be set. Returns null if this is a virtual directory. * * @return the completion time of the last copy operation or null if this is a virtual directory */ public OffsetDateTime copyCompletionTime() { return !this.isVirtualDirectory ? this.properties.getCopyCompletionTime() : null; } /** * Returns the description of the last copy failure, this is set when the {@link * {@link CopyStatusType * target of a copy operation or has been modified since this won't be set. Returns null if this is a virtual * directory. * * @return the description of the last copy failure or null if this is a virtual directory */ public String copyStatusDescription() { return !this.isVirtualDirectory ? this.properties.getCopyStatusDescription() : null; } /** * Returns the status of the blob being encrypted on the server or null if this is a virtual directory. * * @return the status of the blob being encrypted on the server or null if this is a virtual directory */ public Boolean isServerEncrypted() { return !this.isVirtualDirectory ? this.properties.isServerEncrypted() : null; } /** * Returns the tier of the blob. This is only set for Page blobs on a premium storage account or for Block blobs on * blob storage or general purpose V2 account. Returns null if this is a virtual directory. * * @return the tier of the blob or null if this is a virtual directory */ public AccessTier accessTier() { return !this.isVirtualDirectory ? this.properties.getAccessTier() : null; } /** * Returns the status of the tier being inferred for the blob. This is only set for Page blobs on a premium storage * account or for Block blobs on blob storage or general purpose V2 account. Returns null if this is a virtual * directory. * * @return the status of the tier being inferred for the blob or null if this is a virtual directory */ public Boolean isAccessTierInferred() { return !this.isVirtualDirectory ? this.properties.isAccessTierInferred() : null; } /** * Returns the archive status of the blob. This is only for blobs on a blob storage and general purpose v2 account. * Returns null if this is a virtual directory. * * @return the archive status of the blob or null if this is a virtual directory */ public ArchiveStatus archiveStatus() { return !this.isVirtualDirectory ? this.properties.getArchiveStatus() : null; } /** * Returns the time when the access tier for the blob was last changed or null if this is a virtual directory. * * @return the time when the access tier for the blob was last changed or null if this is a virtual directory */ public OffsetDateTime accessTierChangeTime() { return !this.isVirtualDirectory ? this.properties.getAccessTierChangeTime() : null; } /** * Returns the metadata associated with this blob or null if this is a virtual directory. * * @return the metadata associated with this blob or null if this is a virtual directory */ public Map<String, String> metadata() { return !this.isVirtualDirectory ? Collections.unmodifiableMap(this.properties.getMetadata()) : null; } /** * Returns the time of last modification or null if this is a virtual directory. * <p> * Last access time is not supported by the blob service. In this case, it is typical for implementations to return * the {@link * * @return the time of last modification or null if this is a virtual directory */ @Override public FileTime lastAccessTime() { return !this.isVirtualDirectory ? FileTime.from(this.properties.getLastAccessedTime().toInstant()) : null; } /** * Tells whether the file is a regular file with opaque content. * * @return whether the file is a regular file. */ @Override public boolean isRegularFile() { return !this.isVirtualDirectory && !this.properties.getMetadata().getOrDefault(AzureResource.DIR_METADATA_MARKER, "false").equals("true"); } /** * Tells whether the file is a directory. * <p> * Will return true if the directory is a concrete or virtual directory. See * {@link AzureFileSystemProvider * concrete directories. * * @return whether the file is a directory */ @Override public boolean isDirectory() { return !this.isRegularFile(); } /** * Tells whether the file is a virtual directory. * <p> * See {@link AzureFileSystemProvider * concrete directories. * * @return whether the file is a virtual directory */ public boolean isVirtualDirectory() { return this.isVirtualDirectory; } /** * Tells whether the file is a symbolic link. * * @return false. Symbolic links are not supported. */ @Override public boolean isSymbolicLink() { return false; } /** * Tells whether the file is something other than a regular file, directory, or symbolic link. * * @return false. No other object types are supported. */ @Override public boolean isOther() { return false; } /** * Returns the size of the file (in bytes). * * @return the size of the file */ @Override /** * Returns the url of the resource. * * @return The file key, which is the url. */ @Override public Object fileKey() { return resource.getBlobClient().getBlobUrl(); } }
Curious why a null check is required here?
public Mono<CosmosContainerResponse> createContainerIfNotExists(CosmosEntityInformation<?, ?> information) { return createDatabaseIfNotExists() .publishOn(Schedulers.parallel()) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to create database", throwable, this.responseDiagnosticsProcessor)) .flatMap(cosmosDatabaseResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosDatabaseResponse.getDiagnostics(), null); final CosmosContainerProperties cosmosContainerProperties = new CosmosContainerProperties(information.getContainerName(), information.getPartitionKeyPath()); cosmosContainerProperties.setDefaultTimeToLiveInSeconds(information.getTimeToLive()); cosmosContainerProperties.setIndexingPolicy(information.getIndexingPolicy()); final UniqueKeyPolicy uniqueKeyPolicy = information.getUniqueKeyPolicy(); if (uniqueKeyPolicy != null) { cosmosContainerProperties.setUniqueKeyPolicy(uniqueKeyPolicy); } CosmosAsyncDatabase database = cosmosAsyncClient.getDatabase(cosmosDatabaseResponse.getProperties().getId()); Mono<CosmosContainerResponse> cosmosContainerResponseMono; if (information.getRequestUnit() == null) { cosmosContainerResponseMono = database.createContainerIfNotExists(cosmosContainerProperties); } else { ThroughputProperties throughputProperties = information.isAutoScale() ? ThroughputProperties.createAutoscaledThroughput(information.getRequestUnit()) : ThroughputProperties.createManualThroughput(information.getRequestUnit()); cosmosContainerResponseMono = database.createContainerIfNotExists(cosmosContainerProperties, throughputProperties); } return cosmosContainerResponseMono .map(cosmosContainerResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosContainerResponse.getDiagnostics(), null); return cosmosContainerResponse; }) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to create container", throwable, this.responseDiagnosticsProcessor)); }); }
cosmosContainerProperties.setUniqueKeyPolicy(uniqueKeyPolicy);
public Mono<CosmosContainerResponse> createContainerIfNotExists(CosmosEntityInformation<?, ?> information) { return createDatabaseIfNotExists() .publishOn(Schedulers.parallel()) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to create database", throwable, this.responseDiagnosticsProcessor)) .flatMap(cosmosDatabaseResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosDatabaseResponse.getDiagnostics(), null); final CosmosContainerProperties cosmosContainerProperties = new CosmosContainerProperties(information.getContainerName(), information.getPartitionKeyPath()); cosmosContainerProperties.setDefaultTimeToLiveInSeconds(information.getTimeToLive()); cosmosContainerProperties.setIndexingPolicy(information.getIndexingPolicy()); final UniqueKeyPolicy uniqueKeyPolicy = information.getUniqueKeyPolicy(); if (uniqueKeyPolicy != null) { cosmosContainerProperties.setUniqueKeyPolicy(uniqueKeyPolicy); } CosmosAsyncDatabase database = cosmosAsyncClient.getDatabase(cosmosDatabaseResponse.getProperties().getId()); Mono<CosmosContainerResponse> cosmosContainerResponseMono; if (information.getRequestUnit() == null) { cosmosContainerResponseMono = database.createContainerIfNotExists(cosmosContainerProperties); } else { ThroughputProperties throughputProperties = information.isAutoScale() ? ThroughputProperties.createAutoscaledThroughput(information.getRequestUnit()) : ThroughputProperties.createManualThroughput(information.getRequestUnit()); cosmosContainerResponseMono = database.createContainerIfNotExists(cosmosContainerProperties, throughputProperties); } return cosmosContainerResponseMono .map(cosmosContainerResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosContainerResponse.getDiagnostics(), null); return cosmosContainerResponse; }) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to create container", throwable, this.responseDiagnosticsProcessor)); }); }
class ReactiveCosmosTemplate implements ReactiveCosmosOperations, ApplicationContextAware { private static final Logger LOGGER = LoggerFactory.getLogger(ReactiveCosmosTemplate.class); private final MappingCosmosConverter mappingCosmosConverter; private final String databaseName; private final ResponseDiagnosticsProcessor responseDiagnosticsProcessor; private final boolean queryMetricsEnabled; private final CosmosAsyncClient cosmosAsyncClient; private final IsNewAwareAuditingHandler cosmosAuditingHandler; private final DatabaseThroughputConfig databaseThroughputConfig; private ApplicationContext applicationContext; /** * Initialization * * @param client must not be {@literal null} * @param databaseName must not be {@literal null} * @param cosmosConfig must not be {@literal null} * @param mappingCosmosConverter must not be {@literal null} * @param cosmosAuditingHandler can be {@literal null} */ public ReactiveCosmosTemplate(CosmosAsyncClient client, String databaseName, CosmosConfig cosmosConfig, MappingCosmosConverter mappingCosmosConverter, IsNewAwareAuditingHandler cosmosAuditingHandler) { this(new CosmosFactory(client, databaseName), cosmosConfig, mappingCosmosConverter, cosmosAuditingHandler); } /** * Initialization * * @param client must not be {@literal null} * @param databaseName must not be {@literal null} * @param cosmosConfig must not be {@literal null} * @param mappingCosmosConverter must not be {@literal null} */ public ReactiveCosmosTemplate(CosmosAsyncClient client, String databaseName, CosmosConfig cosmosConfig, MappingCosmosConverter mappingCosmosConverter) { this(new CosmosFactory(client, databaseName), cosmosConfig, mappingCosmosConverter, null); } /** * Constructor * * @param cosmosFactory the cosmos db factory * @param cosmosConfig the cosmos config * @param mappingCosmosConverter the mappingCosmosConverter * @param cosmosAuditingHandler the auditing handler */ public ReactiveCosmosTemplate(CosmosFactory cosmosFactory, CosmosConfig cosmosConfig, MappingCosmosConverter mappingCosmosConverter, IsNewAwareAuditingHandler cosmosAuditingHandler) { Assert.notNull(cosmosFactory, "CosmosFactory must not be null!"); Assert.notNull(cosmosConfig, "CosmosConfig must not be null!"); Assert.notNull(mappingCosmosConverter, "MappingCosmosConverter must not be null!"); this.mappingCosmosConverter = mappingCosmosConverter; this.cosmosAsyncClient = cosmosFactory.getCosmosAsyncClient(); this.databaseName = cosmosFactory.getDatabaseName(); this.responseDiagnosticsProcessor = cosmosConfig.getResponseDiagnosticsProcessor(); this.queryMetricsEnabled = cosmosConfig.isQueryMetricsEnabled(); this.cosmosAuditingHandler = cosmosAuditingHandler; this.databaseThroughputConfig = cosmosConfig.getDatabaseThroughputConfig(); } /** * Initialization * * @param cosmosFactory must not be {@literal null} * @param cosmosConfig must not be {@literal null} * @param mappingCosmosConverter must not be {@literal null} */ public ReactiveCosmosTemplate(CosmosFactory cosmosFactory, CosmosConfig cosmosConfig, MappingCosmosConverter mappingCosmosConverter) { this(cosmosFactory, cosmosConfig, mappingCosmosConverter, null); } /** * @param applicationContext the application context * @throws BeansException the bean exception */ public void setApplicationContext(@NonNull ApplicationContext applicationContext) throws BeansException { this.applicationContext = applicationContext; } /** * Creates a container if it doesn't already exist * * @param information the CosmosEntityInformation * @return Mono containing CosmosContainerResponse */ @Override private Mono<CosmosDatabaseResponse> createDatabaseIfNotExists() { if (databaseThroughputConfig == null) { return cosmosAsyncClient .createDatabaseIfNotExists(this.databaseName); } else { ThroughputProperties throughputProperties = databaseThroughputConfig.isAutoScale() ? ThroughputProperties.createAutoscaledThroughput(databaseThroughputConfig.getRequestUnits()) : ThroughputProperties.createManualThroughput(databaseThroughputConfig.getRequestUnits()); return cosmosAsyncClient .createDatabaseIfNotExists(this.databaseName, throughputProperties); } } @Override public Mono<CosmosContainerProperties> getContainerProperties(String containerName) { return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .read() .map(CosmosContainerResponse::getProperties); } @Override public Mono<CosmosContainerProperties> replaceContainerProperties(String containerName, CosmosContainerProperties properties) { return this.cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .replace(properties) .map(CosmosContainerResponse::getProperties); } /** * * Find all items in a given container * * @param containerName the containerName * @param domainType the domainType * @return Flux with all the found items or error */ @Override public <T> Flux<T> findAll(String containerName, Class<T> domainType) { final CosmosQuery query = new CosmosQuery(Criteria.getInstance(CriteriaType.ALL)); return find(query, domainType, containerName); } /** * Find all items in a given container * * @param domainType the domainType * @return Flux with all the found items or error */ @Override public <T> Flux<T> findAll(Class<T> domainType) { return findAll(domainType.getSimpleName(), domainType); } @Override public <T> Flux<T> findAll(PartitionKey partitionKey, Class<T> domainType) { Assert.notNull(partitionKey, "partitionKey should not be null"); Assert.notNull(domainType, "domainType should not be null"); final String containerName = getContainerName(domainType); final CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); cosmosQueryRequestOptions.setPartitionKey(partitionKey); cosmosQueryRequestOptions.setQueryMetricsEnabled(this.queryMetricsEnabled); return cosmosAsyncClient .getDatabase(this.databaseName) .getContainer(containerName) .queryItems("SELECT * FROM r", cosmosQueryRequestOptions, JsonNode.class) .byPage() .publishOn(Schedulers.parallel()) .flatMap(cosmosItemFeedResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemFeedResponse.getCosmosDiagnostics(), cosmosItemFeedResponse); return Flux.fromIterable(cosmosItemFeedResponse.getResults()); }) .map(cosmosItemProperties -> emitOnLoadEventAndConvertToDomainObject(domainType, cosmosItemProperties)) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to find items", throwable, this.responseDiagnosticsProcessor)); } /** * Find by id * * @param id the id * @param domainType the domainType * @return Mono with the item or error */ @Override public <T> Mono<T> findById(Object id, Class<T> domainType) { Assert.notNull(domainType, "domainType should not be null"); return findById(getContainerName(domainType), id, domainType); } /** * Find by id * * @param containerName the container name * @param id the id * @param domainType the entity class * @return Mono with the item or error */ @Override public <T> Mono<T> findById(String containerName, Object id, Class<T> domainType) { Assert.hasText(containerName, "containerName should not be null, empty or only whitespaces"); Assert.notNull(domainType, "domainType should not be null"); final String query = "select * from root where root.id = @ROOT_ID"; final SqlParameter param = new SqlParameter("@ROOT_ID", CosmosUtils.getStringIDValue(id)); final SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(query, param); final CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(this.queryMetricsEnabled); return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .queryItems(sqlQuerySpec, options, JsonNode.class) .byPage() .publishOn(Schedulers.parallel()) .flatMap(cosmosItemFeedResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemFeedResponse.getCosmosDiagnostics(), cosmosItemFeedResponse); return Mono.justOrEmpty(cosmosItemFeedResponse .getResults() .stream() .map(cosmosItem -> emitOnLoadEventAndConvertToDomainObject(domainType, cosmosItem)) .findFirst()); }) .onErrorResume(throwable -> CosmosExceptionUtils.findAPIExceptionHandler("Failed to find item", throwable, this.responseDiagnosticsProcessor)) .next(); } /** * Find by id * * @param id the id * @param domainType the entity class * @param partitionKey partition Key * @return Mono with the item or error */ @Override public <T> Mono<T> findById(Object id, Class<T> domainType, PartitionKey partitionKey) { Assert.notNull(domainType, "domainType should not be null"); String idToFind = CosmosUtils.getStringIDValue(id); final String containerName = getContainerName(domainType); return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .readItem(idToFind, partitionKey, JsonNode.class) .publishOn(Schedulers.parallel()) .flatMap(cosmosItemResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemResponse.getDiagnostics(), null); return Mono.justOrEmpty(emitOnLoadEventAndConvertToDomainObject(domainType, cosmosItemResponse.getItem())); }) .onErrorResume(throwable -> CosmosExceptionUtils.findAPIExceptionHandler("Failed to find item", throwable, this.responseDiagnosticsProcessor)); } /** * Insert * * @param <T> type of inserted objectToSave * @param objectToSave the object to save * @param partitionKey the partition key * @return Mono with the item or error */ public <T> Mono<T> insert(T objectToSave, PartitionKey partitionKey) { return insert(getContainerName(objectToSave.getClass()), objectToSave, partitionKey); } /** * Insert * * @param objectToSave the object to save * @param <T> type of inserted objectToSave * @return Mono with the item or error */ public <T> Mono<T> insert(T objectToSave) { return insert(getContainerName(objectToSave.getClass()), objectToSave, null); } /** * Insert * * @param <T> type of inserted objectToSave * @param containerName the container name * @param objectToSave the object to save * @param partitionKey the partition key * @return Mono with the item or error */ public <T> Mono<T> insert(String containerName, Object objectToSave, PartitionKey partitionKey) { Assert.hasText(containerName, "containerName should not be null, empty or only whitespaces"); Assert.notNull(objectToSave, "objectToSave should not be null"); final Class<T> domainType = (Class<T>) objectToSave.getClass(); markAuditedIfConfigured(objectToSave); generateIdIfNullAndAutoGenerationEnabled(objectToSave, domainType); final JsonNode originalItem = mappingCosmosConverter.writeJsonNode(objectToSave); final CosmosItemRequestOptions options = new CosmosItemRequestOptions(); return cosmosAsyncClient .getDatabase(this.databaseName) .getContainer(containerName) .createItem(originalItem, partitionKey, options) .publishOn(Schedulers.parallel()) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to insert item", throwable, this.responseDiagnosticsProcessor)) .flatMap(cosmosItemResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemResponse.getDiagnostics(), null); return Mono.just(toDomainObject(domainType, cosmosItemResponse.getItem())); }); } /** * Insert * * @param <T> type of inserted objectToSave * @param containerName the container name * @param objectToSave the object to save * @return Mono with the item or error */ @Override public <T> Mono<T> insert(String containerName, T objectToSave) { return insert(containerName, objectToSave, null); } @SuppressWarnings("unchecked") private <T> void generateIdIfNullAndAutoGenerationEnabled(T originalItem, Class<?> type) { CosmosEntityInformation<?, ?> entityInfo = CosmosEntityInformation.getInstance(type); if (entityInfo.shouldGenerateId() && ReflectionUtils.getField(entityInfo.getIdField(), originalItem) == null) { ReflectionUtils.setField(entityInfo.getIdField(), originalItem, UUID.randomUUID().toString()); } } /** * Upsert * * @param object the object to upsert * @return Mono with the item or error */ @Override public <T> Mono<T> upsert(T object) { return upsert(getContainerName(object.getClass()), object); } /** * Upsert * * @param containerName the container name * @param object the object to save * @return Mono with the item or error */ @Override public <T> Mono<T> upsert(String containerName, T object) { final Class<T> domainType = (Class<T>) object.getClass(); markAuditedIfConfigured(object); final JsonNode originalItem = mappingCosmosConverter.writeJsonNode(object); final CosmosItemRequestOptions options = new CosmosItemRequestOptions(); applyVersioning(object.getClass(), originalItem, options); return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .upsertItem(originalItem, options) .publishOn(Schedulers.parallel()) .flatMap(cosmosItemResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemResponse.getDiagnostics(), null); return Mono.just(toDomainObject(domainType, cosmosItemResponse.getItem())); }) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to upsert item", throwable, this.responseDiagnosticsProcessor)); } /** * Deletes the item with id and partition key. * * @param containerName Container name of database * @param id item id * @param partitionKey the partition key */ @Override public Mono<Void> deleteById(String containerName, Object id, PartitionKey partitionKey) { return deleteById(containerName, id, partitionKey, new CosmosItemRequestOptions()); } private Mono<Void> deleteById(String containerName, Object id, PartitionKey partitionKey, CosmosItemRequestOptions cosmosItemRequestOptions) { Assert.hasText(containerName, "container name should not be null, empty or only whitespaces"); String idToDelete = CosmosUtils.getStringIDValue(id); if (partitionKey == null) { partitionKey = PartitionKey.NONE; } return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .deleteItem(idToDelete, partitionKey, cosmosItemRequestOptions) .publishOn(Schedulers.parallel()) .doOnNext(cosmosItemResponse -> CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemResponse.getDiagnostics(), null)) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to delete item", throwable, this.responseDiagnosticsProcessor)) .then(); } /** * Deletes the entity * * @param <T> type class of domain type * @param containerName Container name of database * @param entity the entity to delete * @return void Mono */ public <T> Mono<Void> deleteEntity(String containerName, T entity) { Assert.notNull(entity, "entity to be deleted should not be null"); @SuppressWarnings("unchecked") final Class<T> domainType = (Class<T>) entity.getClass(); final JsonNode originalItem = mappingCosmosConverter.writeJsonNode(entity); final CosmosItemRequestOptions options = new CosmosItemRequestOptions(); applyVersioning(entity.getClass(), originalItem, options); return deleteItem(originalItem, containerName, domainType).then(); } /** * Delete all items in a container * * @param containerName the container name * @param domainType the domainType * @return void Mono */ @Override public Mono<Void> deleteAll(@NonNull String containerName, @NonNull Class<?> domainType) { Assert.hasText(containerName, "container name should not be null, empty or only whitespaces"); final CosmosQuery query = new CosmosQuery(Criteria.getInstance(CriteriaType.ALL)); return this.delete(query, domainType, containerName).then(); } /** * Delete items matching query * * @param query the document query * @param domainType the entity class * @param containerName the container name * @return Mono */ @Override public <T> Flux<T> delete(CosmosQuery query, Class<T> domainType, String containerName) { Assert.notNull(query, "DocumentQuery should not be null."); Assert.notNull(domainType, "domainType should not be null."); Assert.hasText(containerName, "container name should not be null, empty or only whitespaces"); final Flux<JsonNode> results = findItems(query, containerName, domainType); return results.flatMap(d -> deleteItem(d, containerName, domainType)); } /** * Find items * * @param query the document query * @param domainType the entity class * @param containerName the container name * @return Flux with found items or error */ @Override public <T> Flux<T> find(CosmosQuery query, Class<T> domainType, String containerName) { return findItems(query, containerName, domainType) .map(cosmosItemProperties -> emitOnLoadEventAndConvertToDomainObject(domainType, cosmosItemProperties)); } /** * Exists * * @param query the document query * @param domainType the entity class * @param containerName the container name * @return Mono with a boolean or error */ @Override public Mono<Boolean> exists(CosmosQuery query, Class<?> domainType, String containerName) { return count(query, containerName).flatMap(count -> Mono.just(count > 0)); } /** * Exists * * @param id the id * @param domainType the entity class * @param containerName the container name * @return Mono with a boolean or error */ public Mono<Boolean> existsById(Object id, Class<?> domainType, String containerName) { return findById(containerName, id, domainType) .flatMap(o -> Mono.just(o != null)); } /** * Count * * @param containerName the container name * @return Mono with the count or error */ @Override public Mono<Long> count(String containerName) { final CosmosQuery query = new CosmosQuery(Criteria.getInstance(CriteriaType.ALL)); return count(query, containerName); } /** * Count * * @param query the document query * @param containerName the container name * @return Mono with count or error */ @Override public Mono<Long> count(CosmosQuery query, String containerName) { final SqlQuerySpec querySpec = new CountQueryGenerator().generateCosmos(query); return getCountValue(querySpec, containerName); } /** * Count * * @param querySpec the document query spec * @param containerName the container name * @return Mono with count or error */ @Override public Mono<Long> count(SqlQuerySpec querySpec, String containerName) { return getCountValue(querySpec, containerName); } @Override public MappingCosmosConverter getConverter() { return mappingCosmosConverter; } @Override public <T> Flux<T> runQuery(SqlQuerySpec querySpec, Class<?> domainType, Class<T> returnType) { return runQuery(querySpec, Sort.unsorted(), domainType, returnType); } @Override public <T> Flux<T> runQuery(SqlQuerySpec querySpec, Sort sort, Class<?> domainType, Class<T> returnType) { SqlQuerySpec sortedQuerySpec = NativeQueryGenerator.getInstance().generateSortedQuery(querySpec, sort); return runQuery(sortedQuerySpec, domainType) .map(cosmosItemProperties -> emitOnLoadEventAndConvertToDomainObject(returnType, cosmosItemProperties)); } private Flux<JsonNode> runQuery(SqlQuerySpec querySpec, Class<?> domainType) { String containerName = getContainerName(domainType); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .queryItems(querySpec, options, JsonNode.class) .byPage() .publishOn(Schedulers.parallel()) .flatMap(cosmosItemFeedResponse -> { CosmosUtils .fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemFeedResponse.getCosmosDiagnostics(), cosmosItemFeedResponse); return Flux.fromIterable(cosmosItemFeedResponse.getResults()); }) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to find items", throwable, this.responseDiagnosticsProcessor)); } private Mono<Long> getCountValue(SqlQuerySpec querySpec, String containerName) { final CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(this.queryMetricsEnabled); return executeQuery(querySpec, containerName, options) .doOnNext(feedResponse -> CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, feedResponse.getCosmosDiagnostics(), feedResponse)) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to get count value", throwable, this.responseDiagnosticsProcessor)) .next() .map(r -> r.getResults().get(0).asLong()); } private Flux<FeedResponse<JsonNode>> executeQuery(SqlQuerySpec sqlQuerySpec, String containerName, CosmosQueryRequestOptions options) { return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .queryItems(sqlQuerySpec, options, JsonNode.class) .byPage() .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to execute query", throwable, this.responseDiagnosticsProcessor)); } /** * Delete container with container name * * @param containerName the container name */ @Override public void deleteContainer(@NonNull String containerName) { Assert.hasText(containerName, "containerName should have text."); cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .delete() .doOnNext(cosmosContainerResponse -> CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosContainerResponse.getDiagnostics(), null)) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to delete container", throwable, this.responseDiagnosticsProcessor)) .block(); } /** * @param domainType the domain class * @return the container name */ public String getContainerName(Class<?> domainType) { Assert.notNull(domainType, "domainType should not be null"); return CosmosEntityInformation.getInstance(domainType).getContainerName(); } private void markAuditedIfConfigured(Object object) { if (cosmosAuditingHandler != null) { cosmosAuditingHandler.markAudited(object); } } private <T> Flux<JsonNode> findItems(@NonNull CosmosQuery query, @NonNull String containerName, @NonNull Class<T> domainType) { final SqlQuerySpec sqlQuerySpec = new FindQuerySpecGenerator().generateCosmos(query); final CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); cosmosQueryRequestOptions.setQueryMetricsEnabled(this.queryMetricsEnabled); Optional<Object> partitionKeyValue = query.getPartitionKeyValue(domainType); partitionKeyValue.ifPresent(o -> { LOGGER.debug("Setting partition key {}", o); cosmosQueryRequestOptions.setPartitionKey(new PartitionKey(o)); }); return cosmosAsyncClient .getDatabase(this.databaseName) .getContainer(containerName) .queryItems(sqlQuerySpec, cosmosQueryRequestOptions, JsonNode.class) .byPage() .publishOn(Schedulers.parallel()) .flatMap(cosmosItemFeedResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemFeedResponse.getCosmosDiagnostics(), cosmosItemFeedResponse); return Flux.fromIterable(cosmosItemFeedResponse.getResults()); }) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to query items", throwable, this.responseDiagnosticsProcessor)); } private <T> Mono<T> deleteItem(@NonNull JsonNode jsonNode, String containerName, @NonNull Class<T> domainType) { final CosmosItemRequestOptions options = new CosmosItemRequestOptions(); applyVersioning(domainType, jsonNode, options); return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .deleteItem(jsonNode, options) .publishOn(Schedulers.parallel()) .map(cosmosItemResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemResponse.getDiagnostics(), null); return cosmosItemResponse; }) .flatMap(objectCosmosItemResponse -> Mono.just(toDomainObject(domainType, jsonNode))) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to delete item", throwable, this.responseDiagnosticsProcessor)); } private <T> T emitOnLoadEventAndConvertToDomainObject(@NonNull Class<T> domainType, JsonNode responseJsonNode) { CosmosEntityInformation<?, ?> entityInformation = CosmosEntityInformation.getInstance(domainType); maybeEmitEvent(new AfterLoadEvent<>(responseJsonNode, domainType, entityInformation.getContainerName())); return toDomainObject(domainType, responseJsonNode); } private <T> T toDomainObject(@NonNull Class<T> domainType, JsonNode jsonNode) { return mappingCosmosConverter.read(domainType, jsonNode); } private void applyVersioning(Class<?> domainType, JsonNode jsonNode, CosmosItemRequestOptions options) { CosmosEntityInformation<?, ?> entityInformation = CosmosEntityInformation.getInstance(domainType); if (entityInformation.isVersioned()) { options.setIfMatchETag(jsonNode.get(Constants.ETAG_PROPERTY_DEFAULT_NAME).asText()); } } private void maybeEmitEvent(CosmosMappingEvent<?> event) { if (canPublishEvent()) { this.applicationContext.publishEvent(event); } } private boolean canPublishEvent() { return this.applicationContext != null; } }
class ReactiveCosmosTemplate implements ReactiveCosmosOperations, ApplicationContextAware { private static final Logger LOGGER = LoggerFactory.getLogger(ReactiveCosmosTemplate.class); private final MappingCosmosConverter mappingCosmosConverter; private final String databaseName; private final ResponseDiagnosticsProcessor responseDiagnosticsProcessor; private final boolean queryMetricsEnabled; private final CosmosAsyncClient cosmosAsyncClient; private final IsNewAwareAuditingHandler cosmosAuditingHandler; private final DatabaseThroughputConfig databaseThroughputConfig; private ApplicationContext applicationContext; /** * Initialization * * @param client must not be {@literal null} * @param databaseName must not be {@literal null} * @param cosmosConfig must not be {@literal null} * @param mappingCosmosConverter must not be {@literal null} * @param cosmosAuditingHandler can be {@literal null} */ public ReactiveCosmosTemplate(CosmosAsyncClient client, String databaseName, CosmosConfig cosmosConfig, MappingCosmosConverter mappingCosmosConverter, IsNewAwareAuditingHandler cosmosAuditingHandler) { this(new CosmosFactory(client, databaseName), cosmosConfig, mappingCosmosConverter, cosmosAuditingHandler); } /** * Initialization * * @param client must not be {@literal null} * @param databaseName must not be {@literal null} * @param cosmosConfig must not be {@literal null} * @param mappingCosmosConverter must not be {@literal null} */ public ReactiveCosmosTemplate(CosmosAsyncClient client, String databaseName, CosmosConfig cosmosConfig, MappingCosmosConverter mappingCosmosConverter) { this(new CosmosFactory(client, databaseName), cosmosConfig, mappingCosmosConverter, null); } /** * Constructor * * @param cosmosFactory the cosmos db factory * @param cosmosConfig the cosmos config * @param mappingCosmosConverter the mappingCosmosConverter * @param cosmosAuditingHandler the auditing handler */ public ReactiveCosmosTemplate(CosmosFactory cosmosFactory, CosmosConfig cosmosConfig, MappingCosmosConverter mappingCosmosConverter, IsNewAwareAuditingHandler cosmosAuditingHandler) { Assert.notNull(cosmosFactory, "CosmosFactory must not be null!"); Assert.notNull(cosmosConfig, "CosmosConfig must not be null!"); Assert.notNull(mappingCosmosConverter, "MappingCosmosConverter must not be null!"); this.mappingCosmosConverter = mappingCosmosConverter; this.cosmosAsyncClient = cosmosFactory.getCosmosAsyncClient(); this.databaseName = cosmosFactory.getDatabaseName(); this.responseDiagnosticsProcessor = cosmosConfig.getResponseDiagnosticsProcessor(); this.queryMetricsEnabled = cosmosConfig.isQueryMetricsEnabled(); this.cosmosAuditingHandler = cosmosAuditingHandler; this.databaseThroughputConfig = cosmosConfig.getDatabaseThroughputConfig(); } /** * Initialization * * @param cosmosFactory must not be {@literal null} * @param cosmosConfig must not be {@literal null} * @param mappingCosmosConverter must not be {@literal null} */ public ReactiveCosmosTemplate(CosmosFactory cosmosFactory, CosmosConfig cosmosConfig, MappingCosmosConverter mappingCosmosConverter) { this(cosmosFactory, cosmosConfig, mappingCosmosConverter, null); } /** * @param applicationContext the application context * @throws BeansException the bean exception */ public void setApplicationContext(@NonNull ApplicationContext applicationContext) throws BeansException { this.applicationContext = applicationContext; } /** * Creates a container if it doesn't already exist * * @param information the CosmosEntityInformation * @return Mono containing CosmosContainerResponse */ @Override private Mono<CosmosDatabaseResponse> createDatabaseIfNotExists() { if (databaseThroughputConfig == null) { return cosmosAsyncClient .createDatabaseIfNotExists(this.databaseName); } else { ThroughputProperties throughputProperties = databaseThroughputConfig.isAutoScale() ? ThroughputProperties.createAutoscaledThroughput(databaseThroughputConfig.getRequestUnits()) : ThroughputProperties.createManualThroughput(databaseThroughputConfig.getRequestUnits()); return cosmosAsyncClient .createDatabaseIfNotExists(this.databaseName, throughputProperties); } } @Override public Mono<CosmosContainerProperties> getContainerProperties(String containerName) { return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .read() .map(CosmosContainerResponse::getProperties); } @Override public Mono<CosmosContainerProperties> replaceContainerProperties(String containerName, CosmosContainerProperties properties) { return this.cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .replace(properties) .map(CosmosContainerResponse::getProperties); } /** * * Find all items in a given container * * @param containerName the containerName * @param domainType the domainType * @return Flux with all the found items or error */ @Override public <T> Flux<T> findAll(String containerName, Class<T> domainType) { final CosmosQuery query = new CosmosQuery(Criteria.getInstance(CriteriaType.ALL)); return find(query, domainType, containerName); } /** * Find all items in a given container * * @param domainType the domainType * @return Flux with all the found items or error */ @Override public <T> Flux<T> findAll(Class<T> domainType) { return findAll(domainType.getSimpleName(), domainType); } @Override public <T> Flux<T> findAll(PartitionKey partitionKey, Class<T> domainType) { Assert.notNull(partitionKey, "partitionKey should not be null"); Assert.notNull(domainType, "domainType should not be null"); final String containerName = getContainerName(domainType); final CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); cosmosQueryRequestOptions.setPartitionKey(partitionKey); cosmosQueryRequestOptions.setQueryMetricsEnabled(this.queryMetricsEnabled); return cosmosAsyncClient .getDatabase(this.databaseName) .getContainer(containerName) .queryItems("SELECT * FROM r", cosmosQueryRequestOptions, JsonNode.class) .byPage() .publishOn(Schedulers.parallel()) .flatMap(cosmosItemFeedResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemFeedResponse.getCosmosDiagnostics(), cosmosItemFeedResponse); return Flux.fromIterable(cosmosItemFeedResponse.getResults()); }) .map(cosmosItemProperties -> emitOnLoadEventAndConvertToDomainObject(domainType, cosmosItemProperties)) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to find items", throwable, this.responseDiagnosticsProcessor)); } /** * Find by id * * @param id the id * @param domainType the domainType * @return Mono with the item or error */ @Override public <T> Mono<T> findById(Object id, Class<T> domainType) { Assert.notNull(domainType, "domainType should not be null"); return findById(getContainerName(domainType), id, domainType); } /** * Find by id * * @param containerName the container name * @param id the id * @param domainType the entity class * @return Mono with the item or error */ @Override public <T> Mono<T> findById(String containerName, Object id, Class<T> domainType) { Assert.hasText(containerName, "containerName should not be null, empty or only whitespaces"); Assert.notNull(domainType, "domainType should not be null"); final String query = "select * from root where root.id = @ROOT_ID"; final SqlParameter param = new SqlParameter("@ROOT_ID", CosmosUtils.getStringIDValue(id)); final SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(query, param); final CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(this.queryMetricsEnabled); return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .queryItems(sqlQuerySpec, options, JsonNode.class) .byPage() .publishOn(Schedulers.parallel()) .flatMap(cosmosItemFeedResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemFeedResponse.getCosmosDiagnostics(), cosmosItemFeedResponse); return Mono.justOrEmpty(cosmosItemFeedResponse .getResults() .stream() .map(cosmosItem -> emitOnLoadEventAndConvertToDomainObject(domainType, cosmosItem)) .findFirst()); }) .onErrorResume(throwable -> CosmosExceptionUtils.findAPIExceptionHandler("Failed to find item", throwable, this.responseDiagnosticsProcessor)) .next(); } /** * Find by id * * @param id the id * @param domainType the entity class * @param partitionKey partition Key * @return Mono with the item or error */ @Override public <T> Mono<T> findById(Object id, Class<T> domainType, PartitionKey partitionKey) { Assert.notNull(domainType, "domainType should not be null"); String idToFind = CosmosUtils.getStringIDValue(id); final String containerName = getContainerName(domainType); return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .readItem(idToFind, partitionKey, JsonNode.class) .publishOn(Schedulers.parallel()) .flatMap(cosmosItemResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemResponse.getDiagnostics(), null); return Mono.justOrEmpty(emitOnLoadEventAndConvertToDomainObject(domainType, cosmosItemResponse.getItem())); }) .onErrorResume(throwable -> CosmosExceptionUtils.findAPIExceptionHandler("Failed to find item", throwable, this.responseDiagnosticsProcessor)); } /** * Insert * * @param <T> type of inserted objectToSave * @param objectToSave the object to save * @param partitionKey the partition key * @return Mono with the item or error */ public <T> Mono<T> insert(T objectToSave, PartitionKey partitionKey) { return insert(getContainerName(objectToSave.getClass()), objectToSave, partitionKey); } /** * Insert * * @param objectToSave the object to save * @param <T> type of inserted objectToSave * @return Mono with the item or error */ public <T> Mono<T> insert(T objectToSave) { return insert(getContainerName(objectToSave.getClass()), objectToSave, null); } /** * Insert * * @param <T> type of inserted objectToSave * @param containerName the container name * @param objectToSave the object to save * @param partitionKey the partition key * @return Mono with the item or error */ public <T> Mono<T> insert(String containerName, Object objectToSave, PartitionKey partitionKey) { Assert.hasText(containerName, "containerName should not be null, empty or only whitespaces"); Assert.notNull(objectToSave, "objectToSave should not be null"); final Class<T> domainType = (Class<T>) objectToSave.getClass(); markAuditedIfConfigured(objectToSave); generateIdIfNullAndAutoGenerationEnabled(objectToSave, domainType); final JsonNode originalItem = mappingCosmosConverter.writeJsonNode(objectToSave); final CosmosItemRequestOptions options = new CosmosItemRequestOptions(); return cosmosAsyncClient .getDatabase(this.databaseName) .getContainer(containerName) .createItem(originalItem, partitionKey, options) .publishOn(Schedulers.parallel()) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to insert item", throwable, this.responseDiagnosticsProcessor)) .flatMap(cosmosItemResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemResponse.getDiagnostics(), null); return Mono.just(toDomainObject(domainType, cosmosItemResponse.getItem())); }); } /** * Insert * * @param <T> type of inserted objectToSave * @param containerName the container name * @param objectToSave the object to save * @return Mono with the item or error */ @Override public <T> Mono<T> insert(String containerName, T objectToSave) { return insert(containerName, objectToSave, null); } @SuppressWarnings("unchecked") private <T> void generateIdIfNullAndAutoGenerationEnabled(T originalItem, Class<?> type) { CosmosEntityInformation<?, ?> entityInfo = CosmosEntityInformation.getInstance(type); if (entityInfo.shouldGenerateId() && ReflectionUtils.getField(entityInfo.getIdField(), originalItem) == null) { ReflectionUtils.setField(entityInfo.getIdField(), originalItem, UUID.randomUUID().toString()); } } /** * Upsert * * @param object the object to upsert * @return Mono with the item or error */ @Override public <T> Mono<T> upsert(T object) { return upsert(getContainerName(object.getClass()), object); } /** * Upsert * * @param containerName the container name * @param object the object to save * @return Mono with the item or error */ @Override public <T> Mono<T> upsert(String containerName, T object) { final Class<T> domainType = (Class<T>) object.getClass(); markAuditedIfConfigured(object); final JsonNode originalItem = mappingCosmosConverter.writeJsonNode(object); final CosmosItemRequestOptions options = new CosmosItemRequestOptions(); applyVersioning(object.getClass(), originalItem, options); return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .upsertItem(originalItem, options) .publishOn(Schedulers.parallel()) .flatMap(cosmosItemResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemResponse.getDiagnostics(), null); return Mono.just(toDomainObject(domainType, cosmosItemResponse.getItem())); }) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to upsert item", throwable, this.responseDiagnosticsProcessor)); } /** * Deletes the item with id and partition key. * * @param containerName Container name of database * @param id item id * @param partitionKey the partition key */ @Override public Mono<Void> deleteById(String containerName, Object id, PartitionKey partitionKey) { return deleteById(containerName, id, partitionKey, new CosmosItemRequestOptions()); } private Mono<Void> deleteById(String containerName, Object id, PartitionKey partitionKey, CosmosItemRequestOptions cosmosItemRequestOptions) { Assert.hasText(containerName, "container name should not be null, empty or only whitespaces"); String idToDelete = CosmosUtils.getStringIDValue(id); if (partitionKey == null) { partitionKey = PartitionKey.NONE; } return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .deleteItem(idToDelete, partitionKey, cosmosItemRequestOptions) .publishOn(Schedulers.parallel()) .doOnNext(cosmosItemResponse -> CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemResponse.getDiagnostics(), null)) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to delete item", throwable, this.responseDiagnosticsProcessor)) .then(); } /** * Deletes the entity * * @param <T> type class of domain type * @param containerName Container name of database * @param entity the entity to delete * @return void Mono */ public <T> Mono<Void> deleteEntity(String containerName, T entity) { Assert.notNull(entity, "entity to be deleted should not be null"); @SuppressWarnings("unchecked") final Class<T> domainType = (Class<T>) entity.getClass(); final JsonNode originalItem = mappingCosmosConverter.writeJsonNode(entity); final CosmosItemRequestOptions options = new CosmosItemRequestOptions(); applyVersioning(entity.getClass(), originalItem, options); return deleteItem(originalItem, containerName, domainType).then(); } /** * Delete all items in a container * * @param containerName the container name * @param domainType the domainType * @return void Mono */ @Override public Mono<Void> deleteAll(@NonNull String containerName, @NonNull Class<?> domainType) { Assert.hasText(containerName, "container name should not be null, empty or only whitespaces"); final CosmosQuery query = new CosmosQuery(Criteria.getInstance(CriteriaType.ALL)); return this.delete(query, domainType, containerName).then(); } /** * Delete items matching query * * @param query the document query * @param domainType the entity class * @param containerName the container name * @return Mono */ @Override public <T> Flux<T> delete(CosmosQuery query, Class<T> domainType, String containerName) { Assert.notNull(query, "DocumentQuery should not be null."); Assert.notNull(domainType, "domainType should not be null."); Assert.hasText(containerName, "container name should not be null, empty or only whitespaces"); final Flux<JsonNode> results = findItems(query, containerName, domainType); return results.flatMap(d -> deleteItem(d, containerName, domainType)); } /** * Find items * * @param query the document query * @param domainType the entity class * @param containerName the container name * @return Flux with found items or error */ @Override public <T> Flux<T> find(CosmosQuery query, Class<T> domainType, String containerName) { return findItems(query, containerName, domainType) .map(cosmosItemProperties -> emitOnLoadEventAndConvertToDomainObject(domainType, cosmosItemProperties)); } /** * Exists * * @param query the document query * @param domainType the entity class * @param containerName the container name * @return Mono with a boolean or error */ @Override public Mono<Boolean> exists(CosmosQuery query, Class<?> domainType, String containerName) { return count(query, containerName).flatMap(count -> Mono.just(count > 0)); } /** * Exists * * @param id the id * @param domainType the entity class * @param containerName the container name * @return Mono with a boolean or error */ public Mono<Boolean> existsById(Object id, Class<?> domainType, String containerName) { return findById(containerName, id, domainType) .flatMap(o -> Mono.just(o != null)); } /** * Count * * @param containerName the container name * @return Mono with the count or error */ @Override public Mono<Long> count(String containerName) { final CosmosQuery query = new CosmosQuery(Criteria.getInstance(CriteriaType.ALL)); return count(query, containerName); } /** * Count * * @param query the document query * @param containerName the container name * @return Mono with count or error */ @Override public Mono<Long> count(CosmosQuery query, String containerName) { final SqlQuerySpec querySpec = new CountQueryGenerator().generateCosmos(query); return getCountValue(querySpec, containerName); } /** * Count * * @param querySpec the document query spec * @param containerName the container name * @return Mono with count or error */ @Override public Mono<Long> count(SqlQuerySpec querySpec, String containerName) { return getCountValue(querySpec, containerName); } @Override public MappingCosmosConverter getConverter() { return mappingCosmosConverter; } @Override public <T> Flux<T> runQuery(SqlQuerySpec querySpec, Class<?> domainType, Class<T> returnType) { return runQuery(querySpec, Sort.unsorted(), domainType, returnType); } @Override public <T> Flux<T> runQuery(SqlQuerySpec querySpec, Sort sort, Class<?> domainType, Class<T> returnType) { SqlQuerySpec sortedQuerySpec = NativeQueryGenerator.getInstance().generateSortedQuery(querySpec, sort); return runQuery(sortedQuerySpec, domainType) .map(cosmosItemProperties -> emitOnLoadEventAndConvertToDomainObject(returnType, cosmosItemProperties)); } private Flux<JsonNode> runQuery(SqlQuerySpec querySpec, Class<?> domainType) { String containerName = getContainerName(domainType); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .queryItems(querySpec, options, JsonNode.class) .byPage() .publishOn(Schedulers.parallel()) .flatMap(cosmosItemFeedResponse -> { CosmosUtils .fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemFeedResponse.getCosmosDiagnostics(), cosmosItemFeedResponse); return Flux.fromIterable(cosmosItemFeedResponse.getResults()); }) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to find items", throwable, this.responseDiagnosticsProcessor)); } private Mono<Long> getCountValue(SqlQuerySpec querySpec, String containerName) { final CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(this.queryMetricsEnabled); return executeQuery(querySpec, containerName, options) .doOnNext(feedResponse -> CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, feedResponse.getCosmosDiagnostics(), feedResponse)) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to get count value", throwable, this.responseDiagnosticsProcessor)) .next() .map(r -> r.getResults().get(0).asLong()); } private Flux<FeedResponse<JsonNode>> executeQuery(SqlQuerySpec sqlQuerySpec, String containerName, CosmosQueryRequestOptions options) { return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .queryItems(sqlQuerySpec, options, JsonNode.class) .byPage() .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to execute query", throwable, this.responseDiagnosticsProcessor)); } /** * Delete container with container name * * @param containerName the container name */ @Override public void deleteContainer(@NonNull String containerName) { Assert.hasText(containerName, "containerName should have text."); cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .delete() .doOnNext(cosmosContainerResponse -> CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosContainerResponse.getDiagnostics(), null)) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to delete container", throwable, this.responseDiagnosticsProcessor)) .block(); } /** * @param domainType the domain class * @return the container name */ public String getContainerName(Class<?> domainType) { Assert.notNull(domainType, "domainType should not be null"); return CosmosEntityInformation.getInstance(domainType).getContainerName(); } private void markAuditedIfConfigured(Object object) { if (cosmosAuditingHandler != null) { cosmosAuditingHandler.markAudited(object); } } private <T> Flux<JsonNode> findItems(@NonNull CosmosQuery query, @NonNull String containerName, @NonNull Class<T> domainType) { final SqlQuerySpec sqlQuerySpec = new FindQuerySpecGenerator().generateCosmos(query); final CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); cosmosQueryRequestOptions.setQueryMetricsEnabled(this.queryMetricsEnabled); Optional<Object> partitionKeyValue = query.getPartitionKeyValue(domainType); partitionKeyValue.ifPresent(o -> { LOGGER.debug("Setting partition key {}", o); cosmosQueryRequestOptions.setPartitionKey(new PartitionKey(o)); }); return cosmosAsyncClient .getDatabase(this.databaseName) .getContainer(containerName) .queryItems(sqlQuerySpec, cosmosQueryRequestOptions, JsonNode.class) .byPage() .publishOn(Schedulers.parallel()) .flatMap(cosmosItemFeedResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemFeedResponse.getCosmosDiagnostics(), cosmosItemFeedResponse); return Flux.fromIterable(cosmosItemFeedResponse.getResults()); }) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to query items", throwable, this.responseDiagnosticsProcessor)); } private <T> Mono<T> deleteItem(@NonNull JsonNode jsonNode, String containerName, @NonNull Class<T> domainType) { final CosmosItemRequestOptions options = new CosmosItemRequestOptions(); applyVersioning(domainType, jsonNode, options); return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .deleteItem(jsonNode, options) .publishOn(Schedulers.parallel()) .map(cosmosItemResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemResponse.getDiagnostics(), null); return cosmosItemResponse; }) .flatMap(objectCosmosItemResponse -> Mono.just(toDomainObject(domainType, jsonNode))) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to delete item", throwable, this.responseDiagnosticsProcessor)); } private <T> T emitOnLoadEventAndConvertToDomainObject(@NonNull Class<T> domainType, JsonNode responseJsonNode) { CosmosEntityInformation<?, ?> entityInformation = CosmosEntityInformation.getInstance(domainType); maybeEmitEvent(new AfterLoadEvent<>(responseJsonNode, domainType, entityInformation.getContainerName())); return toDomainObject(domainType, responseJsonNode); } private <T> T toDomainObject(@NonNull Class<T> domainType, JsonNode jsonNode) { return mappingCosmosConverter.read(domainType, jsonNode); } private void applyVersioning(Class<?> domainType, JsonNode jsonNode, CosmosItemRequestOptions options) { CosmosEntityInformation<?, ?> entityInformation = CosmosEntityInformation.getInstance(domainType); if (entityInformation.isVersioned()) { options.setIfMatchETag(jsonNode.get(Constants.ETAG_PROPERTY_DEFAULT_NAME).asText()); } } private void maybeEmitEvent(CosmosMappingEvent<?> event) { if (canPublishEvent()) { this.applicationContext.publishEvent(event); } } private boolean canPublishEvent() { return this.applicationContext != null; } }
what if customer already defined some uniqueKey policy on the container? Will this override what already defined? (just thinking about whether it will cause the same problem happened to indexPolicy sometime back:https://github.com/Azure/azure-sdk-for-java/issues/20330)
public CosmosContainerProperties createContainerIfNotExists(CosmosEntityInformation<?, ?> information) { final CosmosContainerResponse response = createDatabaseIfNotExists() .publishOn(Schedulers.parallel()) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to create database", throwable, this.responseDiagnosticsProcessor)) .flatMap(cosmosDatabaseResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosDatabaseResponse.getDiagnostics(), null); final CosmosContainerProperties cosmosContainerProperties = new CosmosContainerProperties(information.getContainerName(), information.getPartitionKeyPath()); cosmosContainerProperties.setDefaultTimeToLiveInSeconds(information.getTimeToLive()); cosmosContainerProperties.setIndexingPolicy(information.getIndexingPolicy()); final UniqueKeyPolicy uniqueKeyPolicy = information.getUniqueKeyPolicy(); if (uniqueKeyPolicy != null) { cosmosContainerProperties.setUniqueKeyPolicy(uniqueKeyPolicy); } CosmosAsyncDatabase cosmosAsyncDatabase = cosmosAsyncClient .getDatabase(cosmosDatabaseResponse.getProperties().getId()); Mono<CosmosContainerResponse> cosmosContainerResponseMono; if (information.getRequestUnit() == null) { cosmosContainerResponseMono = cosmosAsyncDatabase.createContainerIfNotExists(cosmosContainerProperties); } else { ThroughputProperties throughputProperties = information.isAutoScale() ? ThroughputProperties.createAutoscaledThroughput(information.getRequestUnit()) : ThroughputProperties.createManualThroughput(information.getRequestUnit()); cosmosContainerResponseMono = cosmosAsyncDatabase.createContainerIfNotExists(cosmosContainerProperties, throughputProperties); } return cosmosContainerResponseMono .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to create container", throwable, this.responseDiagnosticsProcessor)) .doOnNext(cosmosContainerResponse -> CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosContainerResponse.getDiagnostics(), null)); }) .block(); assert response != null; return response.getProperties(); }
cosmosContainerProperties.setUniqueKeyPolicy(uniqueKeyPolicy);
public CosmosContainerProperties createContainerIfNotExists(CosmosEntityInformation<?, ?> information) { final CosmosContainerResponse response = createDatabaseIfNotExists() .publishOn(Schedulers.parallel()) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to create database", throwable, this.responseDiagnosticsProcessor)) .flatMap(cosmosDatabaseResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosDatabaseResponse.getDiagnostics(), null); final CosmosContainerProperties cosmosContainerProperties = new CosmosContainerProperties(information.getContainerName(), information.getPartitionKeyPath()); cosmosContainerProperties.setDefaultTimeToLiveInSeconds(information.getTimeToLive()); cosmosContainerProperties.setIndexingPolicy(information.getIndexingPolicy()); final UniqueKeyPolicy uniqueKeyPolicy = information.getUniqueKeyPolicy(); if (uniqueKeyPolicy != null) { cosmosContainerProperties.setUniqueKeyPolicy(uniqueKeyPolicy); } CosmosAsyncDatabase cosmosAsyncDatabase = cosmosAsyncClient .getDatabase(cosmosDatabaseResponse.getProperties().getId()); Mono<CosmosContainerResponse> cosmosContainerResponseMono; if (information.getRequestUnit() == null) { cosmosContainerResponseMono = cosmosAsyncDatabase.createContainerIfNotExists(cosmosContainerProperties); } else { ThroughputProperties throughputProperties = information.isAutoScale() ? ThroughputProperties.createAutoscaledThroughput(information.getRequestUnit()) : ThroughputProperties.createManualThroughput(information.getRequestUnit()); cosmosContainerResponseMono = cosmosAsyncDatabase.createContainerIfNotExists(cosmosContainerProperties, throughputProperties); } return cosmosContainerResponseMono .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to create container", throwable, this.responseDiagnosticsProcessor)) .doOnNext(cosmosContainerResponse -> CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosContainerResponse.getDiagnostics(), null)); }) .block(); assert response != null; return response.getProperties(); }
class type of domain * @return found results in a List */ public <T> Iterable<T> findAll(String containerName, final Class<T> domainType) { Assert.hasText(containerName, "containerName should not be null, empty or only whitespaces"); Assert.notNull(domainType, "domainType should not be null"); final CosmosQuery query = new CosmosQuery(Criteria.getInstance(CriteriaType.ALL)); return findItems(query, containerName, domainType); }
class type of domain * @return found results in a List */ public <T> Iterable<T> findAll(String containerName, final Class<T> domainType) { Assert.hasText(containerName, "containerName should not be null, empty or only whitespaces"); Assert.notNull(domainType, "domainType should not be null"); final CosmosQuery query = new CosmosQuery(Criteria.getInstance(CriteriaType.ALL)); return findItems(query, containerName, domainType); }
I think this can be the value property of a enum, similar like KeyEncryptionKeyResolverName
private static String getNameForKeyEncryptionKeyAlgorithm(KeyEncryptionKeyAlgorithm keyEncryptionKeyAlgorithm) { if(keyEncryptionKeyAlgorithm == KeyEncryptionKeyAlgorithm.RSA_OAEP) { return "RSA-OAEP"; } throw new IllegalArgumentException(String.format("Unexpected algorithm '%s'", keyEncryptionKeyAlgorithm)); }
return "RSA-OAEP";
private static String getNameForKeyEncryptionKeyAlgorithm(KeyEncryptionKeyAlgorithm keyEncryptionKeyAlgorithm) { if(keyEncryptionKeyAlgorithm == KeyEncryptionKeyAlgorithm.RSA_OAEP) { return "RSA-OAEP"; } throw new IllegalArgumentException(String.format("Unexpected algorithm '%s'", keyEncryptionKeyAlgorithm)); }
class EncryptionKeyStoreProviderImpl extends EncryptionKeyStoreProvider { private final KeyEncryptionKeyResolver keyEncryptionKeyResolver; private final String keyEncryptionKeyProviderName; public EncryptionKeyStoreProviderImpl(KeyEncryptionKeyResolver keyEncryptionKeyResolver, String keyEncryptionKeyProviderName) { this.keyEncryptionKeyResolver = keyEncryptionKeyResolver; this.keyEncryptionKeyProviderName = keyEncryptionKeyProviderName; } /** * Getter for provider name. * * @return provider name */ @Override public String getProviderName() { return this.keyEncryptionKeyProviderName; } /** * Unwraps the specified encryptedKey of a data encryption key. The encrypted value is expected to be encrypted * using the key encryption key with the specified encryptionKeyId and using the specified algorithm. * * @param encryptionKeyId * The key Id tells the provider where to find the key. * @param algorithm * The encryption algorithm. * @param encryptedKey * The ciphertext key. * @return The unwrapped data encryption key. */ @Override public byte[] unwrapKey(String encryptionKeyId, KeyEncryptionKeyAlgorithm algorithm, byte[] encryptedKey) { return this.keyEncryptionKeyResolver.buildKeyEncryptionKey(encryptionKeyId).unwrapKey(getNameForKeyEncryptionKeyAlgorithm(algorithm), encryptedKey); } /** * Wraps a data encryption key using the key encryption key with the specified encryptionKeyId and using the * specified algorithm. * * @param encryptionKeyId * The key Id tells the provider where to find the key. * @param algorithm * The encryption algorithm. * @param key * The plaintext key * @return The wrapped data encryption key. */ @Override public byte[] wrapKey(String encryptionKeyId, KeyEncryptionKeyAlgorithm algorithm, byte[] key) { return this.keyEncryptionKeyResolver.buildKeyEncryptionKey(encryptionKeyId).wrapKey(getNameForKeyEncryptionKeyAlgorithm(algorithm), key); } /** * The public facing Cosmos Encryption library interface does not expose this method, hence not supported. * * @param encryptionKeyId * The key Id tells the provider where to find the key. * @param allowEnclaveComputations * Indicates whether the key encryption key supports enclave computations. * @return The signature of the key encryption key metadata. */ @Override public byte[] sign(String encryptionKeyId, boolean allowEnclaveComputations) { return new byte[0]; } /** * The public facing Cosmos Encryption library interface does not expose this method, hence not supported. * * @param encryptionKeyId * The key Id tells the provider where to find the key. * @param allowEnclaveComputations * Indicates whether the key encryption key supports enclave computations. * @param signature * The signature of the key encryption key metadata. * @return true if matching, false if not. * @throws MicrosoftDataEncryptionException * on error */ @Override public boolean verify(String encryptionKeyId, boolean allowEnclaveComputations, byte[] signature) throws MicrosoftDataEncryptionException { throw new MicrosoftDataEncryptionException("The Verify operation is not supported. "); } }
class EncryptionKeyStoreProviderImpl extends EncryptionKeyStoreProvider { private final KeyEncryptionKeyResolver keyEncryptionKeyResolver; private final String keyEncryptionKeyProviderName; public EncryptionKeyStoreProviderImpl(KeyEncryptionKeyResolver keyEncryptionKeyResolver, String keyEncryptionKeyProviderName) { this.keyEncryptionKeyResolver = keyEncryptionKeyResolver; this.keyEncryptionKeyProviderName = keyEncryptionKeyProviderName; } /** * Getter for provider name. * * @return provider name */ @Override public String getProviderName() { return this.keyEncryptionKeyProviderName; } /** * Unwraps the specified encryptedKey of a data encryption key. The encrypted value is expected to be encrypted * using the key encryption key with the specified encryptionKeyId and using the specified algorithm. * * @param encryptionKeyId * The key Id tells the provider where to find the key. * @param algorithm * The encryption algorithm. * @param encryptedKey * The ciphertext key. * @return The unwrapped data encryption key. */ @Override public byte[] unwrapKey(String encryptionKeyId, KeyEncryptionKeyAlgorithm algorithm, byte[] encryptedKey) { return this.keyEncryptionKeyResolver.buildKeyEncryptionKey(encryptionKeyId).unwrapKey(getNameForKeyEncryptionKeyAlgorithm(algorithm), encryptedKey); } /** * Wraps a data encryption key using the key encryption key with the specified encryptionKeyId and using the * specified algorithm. * * @param encryptionKeyId * The key Id tells the provider where to find the key. * @param algorithm * The encryption algorithm. * @param key * The plaintext key * @return The wrapped data encryption key. */ @Override public byte[] wrapKey(String encryptionKeyId, KeyEncryptionKeyAlgorithm algorithm, byte[] key) { return this.keyEncryptionKeyResolver.buildKeyEncryptionKey(encryptionKeyId).wrapKey(getNameForKeyEncryptionKeyAlgorithm(algorithm), key); } /** * The public facing Cosmos Encryption library interface does not expose this method, hence not supported. * * @param encryptionKeyId * The key Id tells the provider where to find the key. * @param allowEnclaveComputations * Indicates whether the key encryption key supports enclave computations. * @return The signature of the key encryption key metadata. */ @Override public byte[] sign(String encryptionKeyId, boolean allowEnclaveComputations) { return new byte[0]; } /** * The public facing Cosmos Encryption library interface does not expose this method, hence not supported. * * @param encryptionKeyId * The key Id tells the provider where to find the key. * @param allowEnclaveComputations * Indicates whether the key encryption key supports enclave computations. * @param signature * The signature of the key encryption key metadata. * @return true if matching, false if not. * @throws MicrosoftDataEncryptionException * on error */ @Override public boolean verify(String encryptionKeyId, boolean allowEnclaveComputations, byte[] signature) throws MicrosoftDataEncryptionException { throw new MicrosoftDataEncryptionException("The Verify operation is not supported. "); } }
would `Mono.just` work? ```suggestion return Mono.just(new AccessToken(encodedCredential, OffsetDateTime.MAX)); ```
public Mono<AccessToken> getToken(TokenRequestContext request) { return Mono.fromCallable(() -> new AccessToken(encodedCredential, OffsetDateTime.MAX)); }
return Mono.fromCallable(() -> new AccessToken(encodedCredential, OffsetDateTime.MAX));
public Mono<AccessToken> getToken(TokenRequestContext request) { return Mono.fromCallable(() -> new AccessToken(encodedCredential, OffsetDateTime.MAX)); }
class BasicAuthenticationCredential implements TokenCredential { /** * Base64 encoded username-password credential. */ private final String encodedCredential; /** * Creates a basic authentication credential. * * @param username basic auth user name * @param password basic auth password */ public BasicAuthenticationCredential(String username, String password) { String credential = username + ":" + password; this.encodedCredential = Base64Util.encodeToString(credential.getBytes(StandardCharsets.UTF_8)); } /** * @throws RuntimeException If the UTF-8 encoding isn't supported. */ @Override }
class BasicAuthenticationCredential implements TokenCredential { /** * Base64 encoded username-password credential. */ private final String encodedCredential; /** * Creates a basic authentication credential. * * @param username basic auth user name * @param password basic auth password */ public BasicAuthenticationCredential(String username, String password) { String credential = username + ":" + password; this.encodedCredential = Base64Util.encodeToString(credential.getBytes(StandardCharsets.UTF_8)); } /** * @throws RuntimeException If the UTF-8 encoding isn't supported. */ @Override }
Need we give the user bean a specificed name and then check the single bean name to make sure it is from the user configuration instead of ours?
void userDefinedEventHubsClientBuilderProvidedShouldNotConfigureTheAuto() { this.contextRunner .withPropertyValues( "spring.cloud.azure.eventhubs.connection-string=" + String.format(CONNECTION_STRING_FORMAT, "test-namespace"), "spring.cloud.azure.eventhubs.event-hub-name=test-event-hub" ) .withUserConfiguration(AzureEventHubsPropertiesTestConfiguration.class) .withBean(EventHubClientBuilder.class, EventHubClientBuilder::new) .run(context -> assertThat(context).hasSingleBean(EventHubClientBuilder.class)); }
.run(context -> assertThat(context).hasSingleBean(EventHubClientBuilder.class));
void userDefinedEventHubsClientBuilderProvidedShouldNotConfigureTheAuto() { this.contextRunner .withPropertyValues( "spring.cloud.azure.eventhubs.connection-string=" + String.format(CONNECTION_STRING_FORMAT, "test-namespace"), "spring.cloud.azure.eventhubs.event-hub-name=test-event-hub" ) .withUserConfiguration(AzureEventHubsPropertiesTestConfiguration.class) .withBean("user-defined-builder", EventHubClientBuilder.class, EventHubClientBuilder::new) .run(context -> { assertThat(context).hasSingleBean(EventHubClientBuilder.class); assertThat(context).hasBean("user-defined-builder"); }); }
class AzureEventHubsClientBuilderConfigurationTests { private final ApplicationContextRunner contextRunner = new ApplicationContextRunner() .withConfiguration(AutoConfigurations.of(AzureEventHubsClientBuilderConfiguration.class)); @Test void noConnectionInfoProvidedShouldNotConfigure() { contextRunner.run(context -> assertThat(context).doesNotHaveBean(AzureEventHubsClientBuilderConfiguration.class)); } @Test @SuppressWarnings("rawtypes") void connectionStringProvidedShouldConfigure() { contextRunner .withPropertyValues( "spring.cloud.azure.eventhubs.connection-string=" + String.format(CONNECTION_STRING_FORMAT, "test-namespace"), "spring.cloud.azure.eventhubs.event-hub-name=test-event-hub" ) .withUserConfiguration(AzureEventHubsPropertiesTestConfiguration.class) .run(context -> { assertThat(context).hasSingleBean(AzureEventHubsClientBuilderConfiguration.class); assertThat(context).hasSingleBean(EventHubClientBuilderFactory.class); assertThat(context).hasSingleBean(EventHubClientBuilder.class); }); } @Test void customizerShouldBeCalled() { EventHubBuilderCustomizer customizer = new EventHubBuilderCustomizer(); this.contextRunner .withPropertyValues( "spring.cloud.azure.eventhubs.connection-string=" + String.format(CONNECTION_STRING_FORMAT, "test-namespace"), "spring.cloud.azure.eventhubs.event-hub-name=test-event-hub" ) .withUserConfiguration(AzureEventHubsPropertiesTestConfiguration.class) .withBean("customizer1", EventHubBuilderCustomizer.class, () -> customizer) .withBean("customizer2", EventHubBuilderCustomizer.class, () -> customizer) .run(context -> assertThat(customizer.getCustomizedTimes()).isEqualTo(2)); } @Test void otherCustomizerShouldNotBeCalled() { EventHubBuilderCustomizer customizer = new EventHubBuilderCustomizer(); OtherBuilderCustomizer otherBuilderCustomizer = new OtherBuilderCustomizer(); this.contextRunner .withPropertyValues( "spring.cloud.azure.eventhubs.connection-string=" + String.format(CONNECTION_STRING_FORMAT, "test-namespace"), "spring.cloud.azure.eventhubs.event-hub-name=test-event-hub" ) .withUserConfiguration(AzureEventHubsPropertiesTestConfiguration.class) .withBean("customizer1", EventHubBuilderCustomizer.class, () -> customizer) .withBean("customizer2", EventHubBuilderCustomizer.class, () -> customizer) .withBean("customizer3", OtherBuilderCustomizer.class, () -> otherBuilderCustomizer) .run(context -> { assertThat(customizer.getCustomizedTimes()).isEqualTo(2); assertThat(otherBuilderCustomizer.getCustomizedTimes()).isEqualTo(0); }); } @Test private static class EventHubBuilderCustomizer extends TestBuilderCustomizer<EventHubClientBuilder> { } private static class OtherBuilderCustomizer extends TestBuilderCustomizer<ConfigurationClientBuilder> { } }
class AzureEventHubsClientBuilderConfigurationTests { private final ApplicationContextRunner contextRunner = new ApplicationContextRunner() .withConfiguration(AutoConfigurations.of(AzureEventHubsClientBuilderConfiguration.class)); @Test void noConnectionInfoProvidedShouldNotConfigure() { contextRunner.run(context -> assertThat(context).doesNotHaveBean(AzureEventHubsClientBuilderConfiguration.class)); } @Test @SuppressWarnings("rawtypes") void connectionStringProvidedShouldConfigure() { contextRunner .withPropertyValues( "spring.cloud.azure.eventhubs.connection-string=" + String.format(CONNECTION_STRING_FORMAT, "test-namespace"), "spring.cloud.azure.eventhubs.event-hub-name=test-event-hub" ) .withUserConfiguration(AzureEventHubsPropertiesTestConfiguration.class) .run(context -> { assertThat(context).hasSingleBean(AzureEventHubsClientBuilderConfiguration.class); assertThat(context).hasSingleBean(EventHubClientBuilderFactory.class); assertThat(context).hasSingleBean(EventHubClientBuilder.class); }); } @Test void customizerShouldBeCalled() { EventHubBuilderCustomizer customizer = new EventHubBuilderCustomizer(); this.contextRunner .withPropertyValues( "spring.cloud.azure.eventhubs.connection-string=" + String.format(CONNECTION_STRING_FORMAT, "test-namespace"), "spring.cloud.azure.eventhubs.event-hub-name=test-event-hub" ) .withUserConfiguration(AzureEventHubsPropertiesTestConfiguration.class) .withBean("customizer1", EventHubBuilderCustomizer.class, () -> customizer) .withBean("customizer2", EventHubBuilderCustomizer.class, () -> customizer) .run(context -> assertThat(customizer.getCustomizedTimes()).isEqualTo(2)); } @Test void otherCustomizerShouldNotBeCalled() { EventHubBuilderCustomizer customizer = new EventHubBuilderCustomizer(); OtherBuilderCustomizer otherBuilderCustomizer = new OtherBuilderCustomizer(); this.contextRunner .withPropertyValues( "spring.cloud.azure.eventhubs.connection-string=" + String.format(CONNECTION_STRING_FORMAT, "test-namespace"), "spring.cloud.azure.eventhubs.event-hub-name=test-event-hub" ) .withUserConfiguration(AzureEventHubsPropertiesTestConfiguration.class) .withBean("customizer1", EventHubBuilderCustomizer.class, () -> customizer) .withBean("customizer2", EventHubBuilderCustomizer.class, () -> customizer) .withBean("customizer3", OtherBuilderCustomizer.class, () -> otherBuilderCustomizer) .run(context -> { assertThat(customizer.getCustomizedTimes()).isEqualTo(2); assertThat(otherBuilderCustomizer.getCustomizedTimes()).isEqualTo(0); }); } @Test private static class EventHubBuilderCustomizer extends TestBuilderCustomizer<EventHubClientBuilder> { } private static class OtherBuilderCustomizer extends TestBuilderCustomizer<ConfigurationClientBuilder> { } }
We created the key vault which makes us the owner. Why we reassign the role here?
public void canCreateVirtualMachineWithDiskEncryptionSet() { String clientId = this.clientIdFromFile(); String vaultName = generateRandomResourceName("kv", 8); Vault vault = azureResourceManager.vaults().define(vaultName) .withRegion(region) .withNewResourceGroup(rgName) .withRoleBasedAccessControl() .withPurgeProtectionEnabled() .create(); azureResourceManager.accessManagement().roleAssignments().define(UUID.randomUUID().toString()) .forServicePrincipal(clientId) .withBuiltInRole(BuiltInRole.KEY_VAULT_ADMINISTRATOR) .withResourceScope(vault) .create(); ResourceManagerUtils.sleep(Duration.ofMinutes(1)); Key key = vault.keys().define("key1") .withKeyTypeToCreate(KeyType.RSA) .withKeySize(4096) .create(); DiskEncryptionSetInner diskEncryptionSet = azureResourceManager.disks().manager().serviceClient() .getDiskEncryptionSets().createOrUpdate(rgName, "des1", new DiskEncryptionSetInner() .withLocation(region.name()) .withEncryptionType(DiskEncryptionSetType.ENCRYPTION_AT_REST_WITH_PLATFORM_AND_CUSTOMER_KEYS) .withIdentity(new EncryptionSetIdentity().withType(DiskEncryptionSetIdentityType.SYSTEM_ASSIGNED)) .withActiveKey(new KeyForDiskEncryptionSet() .withSourceVault(new SourceVault().withId(vault.id())) .withKeyUrl(key.id()))); DiskEncryptionSetInner diskEncryptionSet2 = azureResourceManager.disks().manager().serviceClient() .getDiskEncryptionSets().createOrUpdate(rgName, "des2", new DiskEncryptionSetInner() .withLocation(region.name()) .withEncryptionType(DiskEncryptionSetType.ENCRYPTION_AT_REST_WITH_CUSTOMER_KEY) .withIdentity(new EncryptionSetIdentity().withType(DiskEncryptionSetIdentityType.SYSTEM_ASSIGNED)) .withActiveKey(new KeyForDiskEncryptionSet() .withSourceVault(new SourceVault().withId(vault.id())) .withKeyUrl(key.id()))); azureResourceManager.accessManagement().roleAssignments().define(UUID.randomUUID().toString()) .forObjectId(diskEncryptionSet.identity().principalId()) .withBuiltInRole(BuiltInRole.KEY_VAULT_CRYPTO_SERVICE_ENCRYPTION_USER) .withResourceScope(vault) .create(); azureResourceManager.accessManagement().roleAssignments().define(UUID.randomUUID().toString()) .forObjectId(diskEncryptionSet2.identity().principalId()) .withBuiltInRole(BuiltInRole.KEY_VAULT_CRYPTO_SERVICE_ENCRYPTION_USER) .withResourceScope(vault) .create(); ResourceManagerUtils.sleep(Duration.ofMinutes(1)); Disk disk1 = azureResourceManager.disks().define("disk1") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(32) .withDiskEncryptionSet(diskEncryptionSet.id()) .create(); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_PLATFORM_AND_CUSTOMER_KEYS, disk1.encryption().type()); Assertions.assertEquals(diskEncryptionSet.id().toLowerCase(Locale.ROOT), disk1.encryption().diskEncryptionSetId().toLowerCase(Locale.ROOT)); VirtualMachine vm = azureResourceManager.virtualMachines().define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/27") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(16, 0, new VirtualMachineDiskOptions() .withDeleteOptions(DeleteOptions.DETACH) .withDiskEncryptionSet(null)) .withExistingDataDisk(disk1) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withDataDiskDefaultDiskEncryptionSet(diskEncryptionSet.id()) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withOSDiskDiskEncryptionSet(diskEncryptionSet.id()) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(diskEncryptionSet.id(), vm.osDiskDiskEncryptionSetId()); Assertions.assertNull(vm.dataDisks().get(0).diskEncryptionSetId()); Assertions.assertEquals(diskEncryptionSet.id().toLowerCase(Locale.ROOT), vm.dataDisks().get(1).diskEncryptionSetId().toLowerCase(Locale.ROOT)); Assertions.assertEquals(DeleteOptions.DETACH, vm.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm.dataDisks().get(1).deleteOptions()); Disk disk2 = azureResourceManager.disks().define("disk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(32) .create(); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_PLATFORM_KEY, disk2.encryption().type()); Assertions.assertNull(disk2.encryption().diskEncryptionSetId()); disk2.update() .withDiskEncryptionSet(diskEncryptionSet.id(), EncryptionType.ENCRYPTION_AT_REST_WITH_PLATFORM_AND_CUSTOMER_KEYS) .apply(); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_PLATFORM_AND_CUSTOMER_KEYS, disk2.encryption().type()); Assertions.assertEquals(diskEncryptionSet.id().toLowerCase(Locale.ROOT), disk2.encryption().diskEncryptionSetId().toLowerCase(Locale.ROOT)); vm.update() .withoutDataDisk(0) .withoutDataDisk(1) .withExistingDataDisk(disk2, 32, 2, new VirtualMachineDiskOptions() .withDeleteOptions(DeleteOptions.DELETE)) .withNewDataDisk(16, 3, CachingTypes.NONE) .withDataDiskDefaultDeleteOptions(DeleteOptions.DETACH) .apply(); Assertions.assertEquals(diskEncryptionSet.id().toLowerCase(Locale.ROOT), vm.dataDisks().get(2).diskEncryptionSetId().toLowerCase(Locale.ROOT)); Assertions.assertNull(vm.dataDisks().get(3).diskEncryptionSetId()); Assertions.assertEquals(DeleteOptions.DELETE, vm.dataDisks().get(2).deleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm.dataDisks().get(3).deleteOptions()); vm.deallocate(); Disk disk = azureResourceManager.disks().getById(vm.dataDisks().get(3).id()); disk.update() .withDiskEncryptionSet(diskEncryptionSet.id(), EncryptionType.ENCRYPTION_AT_REST_WITH_PLATFORM_AND_CUSTOMER_KEYS) .apply(); vm.start(); vm.refresh(); Assertions.assertEquals(diskEncryptionSet.id().toLowerCase(Locale.ROOT), vm.dataDisks().get(3).diskEncryptionSetId().toLowerCase(Locale.ROOT)); vm.update() .withoutDataDisk(2) .withoutDataDisk(3) .withNewDataDisk(16, 0, new VirtualMachineDiskOptions() .withDeleteOptions(DeleteOptions.DELETE) .withDiskEncryptionSet(diskEncryptionSet.id())) .withNewDataDisk(32, 1, CachingTypes.NONE) .withDataDiskDefaultDiskEncryptionSet(diskEncryptionSet2.id()) .apply(); Assertions.assertEquals(diskEncryptionSet.id().toLowerCase(Locale.ROOT), vm.dataDisks().get(0).diskEncryptionSetId().toLowerCase(Locale.ROOT)); Assertions.assertEquals(diskEncryptionSet2.id().toLowerCase(Locale.ROOT), vm.dataDisks().get(1).diskEncryptionSetId().toLowerCase(Locale.ROOT)); Assertions.assertEquals(DeleteOptions.DELETE, vm.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm.dataDisks().get(1).deleteOptions()); disk = azureResourceManager.disks().getById(vm.dataDisks().get(0).id()); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_PLATFORM_AND_CUSTOMER_KEYS, disk.encryption().type()); disk = azureResourceManager.disks().getById(vm.dataDisks().get(1).id()); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_CUSTOMER_KEY, disk.encryption().type()); azureResourceManager.virtualMachines().deleteById(vm.id()); }
.withBuiltInRole(BuiltInRole.KEY_VAULT_ADMINISTRATOR)
public void canCreateVirtualMachineWithDiskEncryptionSet() { String clientId = this.clientIdFromFile(); String vaultName = generateRandomResourceName("kv", 8); Vault vault = azureResourceManager.vaults().define(vaultName) .withRegion(region) .withNewResourceGroup(rgName) .withRoleBasedAccessControl() .withPurgeProtectionEnabled() .create(); azureResourceManager.accessManagement().roleAssignments().define(UUID.randomUUID().toString()) .forServicePrincipal(clientId) .withBuiltInRole(BuiltInRole.KEY_VAULT_ADMINISTRATOR) .withResourceScope(vault) .create(); ResourceManagerUtils.sleep(Duration.ofMinutes(1)); Key key = vault.keys().define("key1") .withKeyTypeToCreate(KeyType.RSA) .withKeySize(4096) .create(); DiskEncryptionSetInner diskEncryptionSet = azureResourceManager.disks().manager().serviceClient() .getDiskEncryptionSets().createOrUpdate(rgName, "des1", new DiskEncryptionSetInner() .withLocation(region.name()) .withEncryptionType(DiskEncryptionSetType.ENCRYPTION_AT_REST_WITH_PLATFORM_AND_CUSTOMER_KEYS) .withIdentity(new EncryptionSetIdentity().withType(DiskEncryptionSetIdentityType.SYSTEM_ASSIGNED)) .withActiveKey(new KeyForDiskEncryptionSet() .withSourceVault(new SourceVault().withId(vault.id())) .withKeyUrl(key.id()))); DiskEncryptionSetInner diskEncryptionSet2 = azureResourceManager.disks().manager().serviceClient() .getDiskEncryptionSets().createOrUpdate(rgName, "des2", new DiskEncryptionSetInner() .withLocation(region.name()) .withEncryptionType(DiskEncryptionSetType.ENCRYPTION_AT_REST_WITH_CUSTOMER_KEY) .withIdentity(new EncryptionSetIdentity().withType(DiskEncryptionSetIdentityType.SYSTEM_ASSIGNED)) .withActiveKey(new KeyForDiskEncryptionSet() .withSourceVault(new SourceVault().withId(vault.id())) .withKeyUrl(key.id()))); azureResourceManager.accessManagement().roleAssignments().define(UUID.randomUUID().toString()) .forObjectId(diskEncryptionSet.identity().principalId()) .withBuiltInRole(BuiltInRole.KEY_VAULT_CRYPTO_SERVICE_ENCRYPTION_USER) .withResourceScope(vault) .create(); azureResourceManager.accessManagement().roleAssignments().define(UUID.randomUUID().toString()) .forObjectId(diskEncryptionSet2.identity().principalId()) .withBuiltInRole(BuiltInRole.KEY_VAULT_CRYPTO_SERVICE_ENCRYPTION_USER) .withResourceScope(vault) .create(); ResourceManagerUtils.sleep(Duration.ofMinutes(1)); Disk disk1 = azureResourceManager.disks().define("disk1") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(32) .withDiskEncryptionSet(diskEncryptionSet.id()) .create(); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_PLATFORM_AND_CUSTOMER_KEYS, disk1.encryption().type()); Assertions.assertEquals(diskEncryptionSet.id().toLowerCase(Locale.ROOT), disk1.encryption().diskEncryptionSetId().toLowerCase(Locale.ROOT)); VirtualMachine vm = azureResourceManager.virtualMachines().define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/27") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(16, 0, new VirtualMachineDiskOptions() .withDeleteOptions(DeleteOptions.DETACH) .withDiskEncryptionSet(null)) .withExistingDataDisk(disk1) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withDataDiskDefaultDiskEncryptionSet(diskEncryptionSet.id()) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withOSDiskDiskEncryptionSet(diskEncryptionSet.id()) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(diskEncryptionSet.id(), vm.osDiskDiskEncryptionSetId()); Assertions.assertNull(vm.dataDisks().get(0).diskEncryptionSetId()); Assertions.assertEquals(diskEncryptionSet.id().toLowerCase(Locale.ROOT), vm.dataDisks().get(1).diskEncryptionSetId().toLowerCase(Locale.ROOT)); Assertions.assertEquals(DeleteOptions.DETACH, vm.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm.dataDisks().get(1).deleteOptions()); Disk disk2 = azureResourceManager.disks().define("disk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(32) .create(); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_PLATFORM_KEY, disk2.encryption().type()); Assertions.assertNull(disk2.encryption().diskEncryptionSetId()); disk2.update() .withDiskEncryptionSet(diskEncryptionSet.id(), EncryptionType.ENCRYPTION_AT_REST_WITH_PLATFORM_AND_CUSTOMER_KEYS) .apply(); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_PLATFORM_AND_CUSTOMER_KEYS, disk2.encryption().type()); Assertions.assertEquals(diskEncryptionSet.id().toLowerCase(Locale.ROOT), disk2.encryption().diskEncryptionSetId().toLowerCase(Locale.ROOT)); vm.update() .withoutDataDisk(0) .withoutDataDisk(1) .withExistingDataDisk(disk2, 32, 2, new VirtualMachineDiskOptions() .withDeleteOptions(DeleteOptions.DELETE)) .withNewDataDisk(16, 3, CachingTypes.NONE) .withDataDiskDefaultDeleteOptions(DeleteOptions.DETACH) .apply(); Assertions.assertEquals(diskEncryptionSet.id().toLowerCase(Locale.ROOT), vm.dataDisks().get(2).diskEncryptionSetId().toLowerCase(Locale.ROOT)); Assertions.assertNull(vm.dataDisks().get(3).diskEncryptionSetId()); Assertions.assertEquals(DeleteOptions.DELETE, vm.dataDisks().get(2).deleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm.dataDisks().get(3).deleteOptions()); vm.deallocate(); Disk disk = azureResourceManager.disks().getById(vm.dataDisks().get(3).id()); disk.update() .withDiskEncryptionSet(diskEncryptionSet.id(), EncryptionType.ENCRYPTION_AT_REST_WITH_PLATFORM_AND_CUSTOMER_KEYS) .apply(); vm.start(); vm.refresh(); Assertions.assertEquals(diskEncryptionSet.id().toLowerCase(Locale.ROOT), vm.dataDisks().get(3).diskEncryptionSetId().toLowerCase(Locale.ROOT)); vm.update() .withoutDataDisk(2) .withoutDataDisk(3) .withNewDataDisk(16, 0, new VirtualMachineDiskOptions() .withDeleteOptions(DeleteOptions.DELETE) .withDiskEncryptionSet(diskEncryptionSet.id())) .withNewDataDisk(32, 1, CachingTypes.NONE) .withDataDiskDefaultDiskEncryptionSet(diskEncryptionSet2.id()) .apply(); Assertions.assertEquals(diskEncryptionSet.id().toLowerCase(Locale.ROOT), vm.dataDisks().get(0).diskEncryptionSetId().toLowerCase(Locale.ROOT)); Assertions.assertEquals(diskEncryptionSet2.id().toLowerCase(Locale.ROOT), vm.dataDisks().get(1).diskEncryptionSetId().toLowerCase(Locale.ROOT)); Assertions.assertEquals(DeleteOptions.DELETE, vm.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm.dataDisks().get(1).deleteOptions()); disk = azureResourceManager.disks().getById(vm.dataDisks().get(0).id()); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_PLATFORM_AND_CUSTOMER_KEYS, disk.encryption().type()); disk = azureResourceManager.disks().getById(vm.dataDisks().get(1).id()); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_CUSTOMER_KEY, disk.encryption().type()); azureResourceManager.virtualMachines().deleteById(vm.id()); }
class VirtualMachineEncryptionTests extends ResourceManagerTestBase { private AzureResourceManager azureResourceManager; private String rgName = ""; private final String vmName = "javavm"; private final Region region = Region.US_EAST; @Override protected HttpPipeline buildHttpPipeline( TokenCredential credential, AzureProfile profile, HttpLogOptions httpLogOptions, List<HttpPipelinePolicy> policies, HttpClient httpClient) { return HttpPipelineProvider.buildHttpPipeline( credential, profile, null, httpLogOptions, null, new RetryPolicy("Retry-After", ChronoUnit.SECONDS), policies, httpClient); } @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { ResourceManagerUtils.InternalRuntimeContext.setDelayProvider(new TestDelayProvider(!isPlaybackMode())); ResourceManagerUtils.InternalRuntimeContext internalContext = new ResourceManagerUtils.InternalRuntimeContext(); internalContext.setIdentifierFunction(name -> new TestIdentifierProvider(testResourceNamer)); azureResourceManager = buildManager(AzureResourceManager.class, httpPipeline, profile); setInternalContext(internalContext, azureResourceManager); rgName = generateRandomResourceName("javacsmrg", 15); } @Override protected void cleanUpResources() { try { azureResourceManager.resourceGroups().beginDeleteByName(rgName); } catch (Exception e) { } } @Test @DoNotRecord(skipInPlayback = true) }
class VirtualMachineEncryptionTests extends ResourceManagerTestBase { private AzureResourceManager azureResourceManager; private String rgName = ""; private final String vmName = "javavm"; private final Region region = Region.US_EAST; @Override protected HttpPipeline buildHttpPipeline( TokenCredential credential, AzureProfile profile, HttpLogOptions httpLogOptions, List<HttpPipelinePolicy> policies, HttpClient httpClient) { return HttpPipelineProvider.buildHttpPipeline( credential, profile, null, httpLogOptions, null, new RetryPolicy("Retry-After", ChronoUnit.SECONDS), policies, httpClient); } @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { ResourceManagerUtils.InternalRuntimeContext.setDelayProvider(new TestDelayProvider(!isPlaybackMode())); ResourceManagerUtils.InternalRuntimeContext internalContext = new ResourceManagerUtils.InternalRuntimeContext(); internalContext.setIdentifierFunction(name -> new TestIdentifierProvider(testResourceNamer)); azureResourceManager = buildManager(AzureResourceManager.class, httpPipeline, profile); setInternalContext(internalContext, azureResourceManager); rgName = generateRandomResourceName("javacsmrg", 15); } @Override protected void cleanUpResources() { try { azureResourceManager.resourceGroups().beginDeleteByName(rgName); } catch (Exception e) { } } @Test @DoNotRecord(skipInPlayback = true) }
nit: Since this is a really simple `String.format` we could drop that and just use string concatenation and save on the performance hit of `String.format`
static List<FormSelectionMark> getReadResultFormSelectionMarks(ReadResult readResultItem, int pageNumber) { return readResultItem.getSelectionMarks().stream() .map(selectionMark -> { final FormSelectionMark formSelectionMark = new FormSelectionMark( null, toBoundingBox(selectionMark.getBoundingBox()), pageNumber); final com.azure.ai.formrecognizer.implementation.models.SelectionMarkState selectionMarkStateImpl = selectionMark.getState(); com.azure.ai.formrecognizer.models.SelectionMarkState selectionMarkState; if (SelectionMarkState.SELECTED.toString().equals(selectionMarkStateImpl.toString())) { selectionMarkState = com.azure.ai.formrecognizer.models.SelectionMarkState.SELECTED; } else if (SelectionMarkState.UNSELECTED.toString().equals(selectionMarkStateImpl.toString())) { selectionMarkState = com.azure.ai.formrecognizer.models.SelectionMarkState.UNSELECTED; } else { throw LOGGER.logThrowableAsError(new RuntimeException( String.format("%s, unsupported selection mark state.", selectionMarkStateImpl))); } FormSelectionMarkHelper.setConfidence(formSelectionMark, selectionMark.getConfidence()); FormSelectionMarkHelper.setState(formSelectionMark, selectionMarkState); return formSelectionMark; }) .collect(Collectors.toList()); }
String.format("%s, unsupported selection mark state.", selectionMarkStateImpl)));
static List<FormSelectionMark> getReadResultFormSelectionMarks(ReadResult readResultItem, int pageNumber) { return readResultItem.getSelectionMarks().stream() .map(selectionMark -> { final FormSelectionMark formSelectionMark = new FormSelectionMark( null, toBoundingBox(selectionMark.getBoundingBox()), pageNumber); final com.azure.ai.formrecognizer.implementation.models.SelectionMarkState selectionMarkStateImpl = selectionMark.getState(); com.azure.ai.formrecognizer.models.SelectionMarkState selectionMarkState; if (SelectionMarkState.SELECTED.toString().equals(selectionMarkStateImpl.toString())) { selectionMarkState = com.azure.ai.formrecognizer.models.SelectionMarkState.SELECTED; } else if (SelectionMarkState.UNSELECTED.toString().equals(selectionMarkStateImpl.toString())) { selectionMarkState = com.azure.ai.formrecognizer.models.SelectionMarkState.UNSELECTED; } else { throw LOGGER.logThrowableAsError(new RuntimeException( selectionMarkStateImpl + ", unsupported selection mark state.")); } FormSelectionMarkHelper.setConfidence(formSelectionMark, selectionMark.getConfidence()); FormSelectionMarkHelper.setState(formSelectionMark, selectionMarkState); return formSelectionMark; }) .collect(Collectors.toList()); }
class Transforms { private static final ClientLogger LOGGER = new ClientLogger(Transforms.class); private static final String WORD_REGEX = "/readResults/(\\d+)/lines/(\\d+)/words/(\\d+)"; private static final String LINE_REGEX = "/readResults/(\\d+)/lines/(\\d+)"; private static final String SELECTION_MARK_REGEX = "/readResults/(\\d+)/selectionMarks/(\\d+)"; private static final float DEFAULT_CONFIDENCE_VALUE = 1.0f; private static final int DEFAULT_TABLE_SPAN = 1; private Transforms() { } /** * Helper method to transform the service returned {@link AnalyzeResult} to SDK model {@link RecognizedForm}. * * @param analyzeResult The service returned result for analyze custom forms. * @param includeFieldElements Boolean to indicate if to set reference elements data on fields. * @param modelId the unlabeled model Id used for recognition. * @return The List of {@code RecognizedForm}. */ static List<RecognizedForm> toRecognizedForm(AnalyzeResult analyzeResult, boolean includeFieldElements, String modelId) { List<ReadResult> readResults = analyzeResult.getReadResults(); List<DocumentResult> documentResults = analyzeResult.getDocumentResults(); List<PageResult> pageResults = analyzeResult.getPageResults(); List<RecognizedForm> extractedFormList; List<FormPage> formPages = toRecognizedLayout(analyzeResult, includeFieldElements); if (!CoreUtils.isNullOrEmpty(documentResults)) { extractedFormList = new ArrayList<>(); for (DocumentResult documentResultItem : documentResults) { FormPageRange formPageRange; List<Integer> documentPageRange = documentResultItem.getPageRange(); if (documentPageRange.size() == 2) { formPageRange = new FormPageRange(documentPageRange.get(0), documentPageRange.get(1)); } else { formPageRange = new FormPageRange(1, 1); } Map<String, FormField> extractedFieldMap = getLabeledFieldMap(documentResultItem, readResults); final RecognizedForm recognizedForm = new RecognizedForm( extractedFieldMap, documentResultItem.getDocType(), formPageRange, formPages.subList(formPageRange.getFirstPageNumber() - 1, formPageRange.getLastPageNumber())); RecognizedFormHelper.setFormTypeConfidence(recognizedForm, documentResultItem.getDocTypeConfidence()); if (documentResultItem.getModelId() != null) { RecognizedFormHelper.setModelId(recognizedForm, documentResultItem.getModelId().toString()); } extractedFormList.add(recognizedForm); } } else { extractedFormList = new ArrayList<>(); if (!CoreUtils.isNullOrEmpty(pageResults)) { forEachWithIndex(pageResults, ((index, pageResultItem) -> { StringBuilder formType = new StringBuilder("form-"); int pageNumber = pageResultItem.getPage(); Integer clusterId = pageResultItem.getClusterId(); if (clusterId != null) { formType.append(clusterId); } Map<String, FormField> extractedFieldMap = getUnlabeledFieldMap(includeFieldElements, readResults, pageResultItem, pageNumber); final RecognizedForm recognizedForm = new RecognizedForm( extractedFieldMap, formType.toString(), new FormPageRange(pageNumber, pageNumber), Collections.singletonList(formPages.get(index))); RecognizedFormHelper.setModelId(recognizedForm, modelId); extractedFormList.add(recognizedForm); })); } } return extractedFormList; } /** * Helper method to transform the service returned {@link AnalyzeResult} to SDK model {@link FormPage}. * * @param analyzeResult The service returned result for analyze layouts. * @param includeFieldElements Boolean to indicate if to set reference elements data on fields. * @return The List of {@code FormPage}. */ static List<FormPage> toRecognizedLayout(AnalyzeResult analyzeResult, boolean includeFieldElements) { List<ReadResult> readResults = analyzeResult.getReadResults(); List<PageResult> pageResults = analyzeResult.getPageResults(); List<FormPage> formPages = new ArrayList<>(); boolean pageResultsIsNullOrEmpty = CoreUtils.isNullOrEmpty(pageResults); forEachWithIndex(readResults, ((index, readResultItem) -> { List<FormTable> perPageTableList = new ArrayList<>(); if (!pageResultsIsNullOrEmpty) { PageResult pageResultItem = pageResults.get(index); if (pageResultItem != null) { perPageTableList = getPageTables(pageResultItem, readResults, pageResultItem.getPage()); } } List<FormLine> perPageFormLineList = new ArrayList<>(); if (includeFieldElements && !CoreUtils.isNullOrEmpty(readResultItem.getLines())) { perPageFormLineList = getReadResultFormLines(readResultItem); } List<FormSelectionMark> perPageFormSelectionMarkList = new ArrayList<>(); if (includeFieldElements && !CoreUtils.isNullOrEmpty(readResultItem.getSelectionMarks())) { PageResult pageResultItem = pageResults.get(index); perPageFormSelectionMarkList = getReadResultFormSelectionMarks(readResultItem, pageResultItem.getPage()); } formPages.add(getFormPage(readResultItem, perPageTableList, perPageFormLineList, perPageFormSelectionMarkList)); })); return formPages; } /** * Helper method to convert the per page {@link ReadResult} item to {@link FormSelectionMark}. * * @param readResultItem The per page text extraction item result returned by the service. * @param pageNumber The page number. * @return A list of {@code FormSelectionMark}. */ /** * Helper method to get per-page table information. * * @param pageResultItem The extracted page level information returned by the service. * @param readResults The text extraction result returned by the service. * @param pageNumber The 1 based page number on which these fields exist. * @return The list of per page {@code FormTable}. */ static List<FormTable> getPageTables(PageResult pageResultItem, List<ReadResult> readResults, int pageNumber) { if (pageResultItem.getTables() == null) { return new ArrayList<>(); } else { return pageResultItem.getTables().stream() .map(dataTable -> { FormTable formTable = new FormTable(dataTable.getRows(), dataTable.getColumns(), dataTable.getCells() .stream() .map(dataTableCell -> new FormTableCell( dataTableCell.getRowIndex(), dataTableCell.getColumnIndex(), dataTableCell.getRowSpan() == null ? DEFAULT_TABLE_SPAN : dataTableCell.getRowSpan(), dataTableCell.getColumnSpan() == null ? DEFAULT_TABLE_SPAN : dataTableCell.getColumnSpan(), dataTableCell.getText(), toBoundingBox(dataTableCell.getBoundingBox()), dataTableCell.getConfidence(), dataTableCell.isHeader() != null && dataTableCell.isHeader(), dataTableCell.isFooter() != null && dataTableCell.isFooter(), pageNumber, setReferenceElements(dataTableCell.getElements(), readResults))) .collect(Collectors.toList()), pageNumber); FormTableHelper.setBoundingBox(formTable, toBoundingBox(dataTable.getBoundingBox())); return formTable; }) .collect(Collectors.toList()); } } /** * Helper method to convert the per page {@link ReadResult} item to {@link FormLine}. * * @param readResultItem The per page text extraction item result returned by the service. * @return The list of {@code FormLine}. */ static List<FormLine> getReadResultFormLines(ReadResult readResultItem) { return readResultItem.getLines().stream() .map(textLine -> { FormLine formLine = new FormLine( textLine.getText(), toBoundingBox(textLine.getBoundingBox()), readResultItem.getPage(), toWords(textLine.getWords(), readResultItem.getPage())); FormLineHelper.setAppearance(formLine, getTextAppearance(textLine)); return formLine; }) .collect(Collectors.toList()); } /** * Private method to get the appearance from the service side text line object. * * @param textLine The service side text line object. * @return the custom type TextAppearance model. */ private static TextAppearance getTextAppearance(TextLine textLine) { TextAppearance textAppearance = new TextAppearance(); if (textLine.getAppearance() != null && textLine.getAppearance().getStyle() != null) { if (textLine.getAppearance().getStyle().getName() != null) { TextAppearanceHelper.setStyleName(textAppearance, TextStyleName.fromString(textLine.getAppearance().getStyle().getName().toString())); } TextAppearanceHelper.setStyleConfidence(textAppearance, textLine.getAppearance().getStyle().getConfidence()); } else { return null; } return textAppearance; } /** * The field map returned on analyze with an unlabeled model id. * * @param documentResultItem The extracted document level information. * @param readResults The text extraction result returned by the service. * @return The {@link RecognizedForm */ private static Map<String, FormField> getLabeledFieldMap(DocumentResult documentResultItem, List<ReadResult> readResults) { Map<String, FormField> recognizedFieldMap = new LinkedHashMap<>(); if (!CoreUtils.isNullOrEmpty(documentResultItem.getFields())) { documentResultItem.getFields().forEach((key, fieldValue) -> { if (fieldValue != null) { List<FormElement> formElementList = setReferenceElements(fieldValue.getElements(), readResults); FieldData valueData; if (fieldValue.getPage() == null && CoreUtils.isNullOrEmpty(fieldValue.getBoundingBox())) { valueData = null; } else { valueData = new FieldData(fieldValue.getText(), toBoundingBox(fieldValue.getBoundingBox()), fieldValue.getPage(), formElementList); } recognizedFieldMap.put(key, setFormField(key, valueData, fieldValue, readResults)); } else { recognizedFieldMap.put(key, new FormField(key, null, null, null, DEFAULT_CONFIDENCE_VALUE)); } }); } return recognizedFieldMap; } /** * Helper method that converts the incoming service field value to one of the strongly typed SDK level * {@link FormField} with reference elements set when {@code includeFieldElements} is set to true. * * @param name The name of the field. * @param valueData The value text of the field. * @param fieldValue The named field values returned by the service. * @param readResults The text extraction result returned by the service. * @return The strongly typed {@link FormField} for the field input. */ private static FormField setFormField(String name, FieldData valueData, FieldValue fieldValue, List<ReadResult> readResults) { com.azure.ai.formrecognizer.models.FieldValue value; switch (fieldValue.getType()) { case PHONE_NUMBER: value = new com.azure.ai.formrecognizer.models.FieldValue(fieldValue.getValuePhoneNumber(), FieldValueType.PHONE_NUMBER); break; case STRING: value = new com.azure.ai.formrecognizer.models.FieldValue(fieldValue.getValueString(), FieldValueType.STRING); break; case TIME: if (fieldValue.getValueTime() != null) { LocalTime fieldTime = LocalTime.parse(fieldValue.getValueTime(), DateTimeFormatter.ofPattern("HH:mm:ss")); value = new com.azure.ai.formrecognizer.models.FieldValue(fieldTime, FieldValueType.TIME); } else { value = new com.azure.ai.formrecognizer.models.FieldValue(null, FieldValueType.TIME); } break; case DATE: value = new com.azure.ai.formrecognizer.models.FieldValue(fieldValue.getValueDate(), FieldValueType.DATE); break; case INTEGER: if (fieldValue.getValueInteger() != null) { value = new com.azure.ai.formrecognizer.models.FieldValue(fieldValue.getValueInteger().longValue(), FieldValueType.LONG); } else { value = new com.azure.ai.formrecognizer.models.FieldValue(null, FieldValueType.LONG); } break; case NUMBER: value = new com.azure.ai.formrecognizer.models.FieldValue(fieldValue.getValueNumber(), FieldValueType.FLOAT); break; case ARRAY: if (fieldValue.getValueArray() != null) { value = new com.azure.ai.formrecognizer.models.FieldValue( toFieldValueArray(fieldValue.getValueArray(), readResults), FieldValueType.LIST); } else { value = new com.azure.ai.formrecognizer.models.FieldValue(null, FieldValueType.LIST); } break; case OBJECT: if (fieldValue.getValueObject() != null) { value = new com.azure.ai.formrecognizer.models.FieldValue( toFieldValueObject(fieldValue.getValueObject(), readResults), FieldValueType.MAP); } else { value = new com.azure.ai.formrecognizer.models.FieldValue(null, FieldValueType.MAP); } break; case SELECTION_MARK: if (fieldValue.getValueSelectionMark() != null) { com.azure.ai.formrecognizer.models.SelectionMarkState selectionMarkState; final FieldValueSelectionMark fieldValueSelectionMarkState = fieldValue.getValueSelectionMark(); if (FieldValueSelectionMark.SELECTED.equals(fieldValueSelectionMarkState)) { selectionMarkState = com.azure.ai.formrecognizer.models.SelectionMarkState.SELECTED; } else if (FieldValueSelectionMark.UNSELECTED.equals(fieldValueSelectionMarkState)) { selectionMarkState = com.azure.ai.formrecognizer.models.SelectionMarkState.UNSELECTED; } else { selectionMarkState = com.azure.ai.formrecognizer.models.SelectionMarkState.fromString( fieldValue.getValueSelectionMark().toString()); } value = new com.azure.ai.formrecognizer.models.FieldValue(selectionMarkState, FieldValueType.SELECTION_MARK_STATE); } else { value = new com.azure.ai.formrecognizer.models.FieldValue(null, FieldValueType.SELECTION_MARK_STATE); } break; case COUNTRY_REGION: value = new com.azure.ai.formrecognizer.models.FieldValue(fieldValue.getValueCountryRegion(), FieldValueType.COUNTRY_REGION); break; default: throw LOGGER.logExceptionAsError(new RuntimeException("FieldValue Type not supported")); } return new FormField(name, null, valueData, value, setDefaultConfidenceValue(fieldValue.getConfidence())); } /** * Helper method to set default confidence value if confidence returned by service is null. * * @param confidence the confidence returned by service. * @return the field confidence value. */ private static float setDefaultConfidenceValue(Float confidence) { return confidence == null ? DEFAULT_CONFIDENCE_VALUE : confidence; } /** * Helper method to convert the service returned * {@link com.azure.ai.formrecognizer.implementation.models.FieldValue * to a SDK level map of {@link FormField}. * * @param valueObject The array of field values returned by the service in {@link FieldValue * @return The Map of {@link FormField}. */ private static Map<String, FormField> toFieldValueObject(Map<String, FieldValue> valueObject, List<ReadResult> readResults) { Map<String, FormField> fieldValueObjectMap = new TreeMap<>(); valueObject.forEach((key, fieldValue) -> { FieldData valueData = null; if (fieldValue.getPage() != null && fieldValue.getBoundingBox() != null) { valueData = new FieldData(fieldValue.getText(), toBoundingBox(fieldValue.getBoundingBox()), fieldValue.getPage(), setReferenceElements(fieldValue.getElements(), readResults)); } fieldValueObjectMap.put(key, setFormField(key, valueData, fieldValue, readResults)); }); return fieldValueObjectMap; } /** * Helper method to convert the service returned * {@link com.azure.ai.formrecognizer.implementation.models.FieldValue * to a SDK level List of {@link FormField}. * * @param valueArray The array of field values returned by the service in {@link FieldValue * @param readResults The text extraction result returned by the service. * @return The List of {@link FormField}. */ private static List<FormField> toFieldValueArray(List<FieldValue> valueArray, List<ReadResult> readResults) { return valueArray.stream() .map(fieldValue -> { FieldData valueData = null; if (ARRAY != fieldValue.getType() && (fieldValue.getPage() != null && fieldValue.getBoundingBox() != null)) { valueData = new FieldData(fieldValue.getText(), toBoundingBox(fieldValue.getBoundingBox()), fieldValue.getPage(), setReferenceElements(fieldValue.getElements(), readResults)); } return setFormField(null, valueData, fieldValue, readResults); }) .collect(Collectors.toList()); } /** * Helper method to convert the page results to {@code FormPage form pages}. * * @param readResultItem The per page text extraction item result returned by the service. * @param perPageTableList The per page tables list. * @param perPageLineList The per page form lines. * @param perPageSelectionMarkList The per page selection marks. * @return The per page {@code FormPage}. */ private static FormPage getFormPage(ReadResult readResultItem, List<FormTable> perPageTableList, List<FormLine> perPageLineList, List<FormSelectionMark> perPageSelectionMarkList) { FormPage formPage = new FormPage( readResultItem.getHeight(), readResultItem.getAngle(), LengthUnit.fromString(readResultItem.getUnit().toString()), readResultItem.getWidth(), perPageLineList, perPageTableList, readResultItem.getPage()); FormPageHelper.setSelectionMarks(formPage, perPageSelectionMarkList); return formPage; } /** * Helper method to set the {@link RecognizedForm * service. * * @param includeFieldElements Boolean to indicate if to set reference elements data on fields. * @param readResults The text extraction result returned by the service. * @param pageResultItem The extracted page level information returned by the service. * @param pageNumber The 1 based page number on which these fields exist. * @return The fields populated on {@link RecognizedForm */ private static Map<String, FormField> getUnlabeledFieldMap(boolean includeFieldElements, List<ReadResult> readResults, PageResult pageResultItem, int pageNumber) { Map<String, FormField> formFieldMap = new LinkedHashMap<>(); List<KeyValuePair> keyValuePairs = pageResultItem.getKeyValuePairs(); forEachWithIndex(keyValuePairs, ((index, keyValuePair) -> { List<FormElement> formKeyContentList = new ArrayList<>(); List<FormElement> formValueContentList = new ArrayList<>(); if (includeFieldElements) { formKeyContentList = setReferenceElements(keyValuePair.getKey().getElements(), readResults); formValueContentList = setReferenceElements(keyValuePair.getValue().getElements(), readResults ); } FieldData labelData = new FieldData(keyValuePair.getKey().getText(), toBoundingBox(keyValuePair.getKey().getBoundingBox()), pageNumber, formKeyContentList); FieldData valueData = new FieldData(keyValuePair.getValue().getText(), toBoundingBox(keyValuePair.getValue().getBoundingBox()), pageNumber, formValueContentList); String fieldName = "field-" + index; FormField formField = new FormField(fieldName, labelData, valueData, new com.azure.ai.formrecognizer.models.FieldValue(keyValuePair.getValue().getText(), FieldValueType.STRING), setDefaultConfidenceValue(keyValuePair.getConfidence()) ); formFieldMap.put(fieldName, formField); })); return formFieldMap; } /** * Helper method to set the text reference elements on FieldValue/fields when {@code includeFieldElements} set to * true. * * @return The list if referenced elements. */ private static List<FormElement> setReferenceElements(List<String> elements, List<ReadResult> readResults) { if (CoreUtils.isNullOrEmpty(elements)) { return new ArrayList<>(); } List<FormElement> formElementList = new ArrayList<>(); elements.forEach(elementString -> { Matcher wordMatcher = Pattern.compile(WORD_REGEX).matcher(elementString); Matcher lineMatcher = Pattern.compile(LINE_REGEX).matcher(elementString); Matcher selectionMarkMatcher = Pattern.compile(SELECTION_MARK_REGEX).matcher(elementString); if (wordMatcher.find() && wordMatcher.groupCount() == 3) { int pageIndex = Integer.parseInt(wordMatcher.group(1)); int lineIndex = Integer.parseInt(wordMatcher.group(2)); int wordIndex = Integer.parseInt(wordMatcher.group(3)); TextWord textWord = readResults.get(pageIndex).getLines().get(lineIndex).getWords().get(wordIndex); FormWord wordElement = new FormWord(textWord.getText(), toBoundingBox(textWord.getBoundingBox()), pageIndex + 1, setDefaultConfidenceValue(textWord.getConfidence())); formElementList.add(wordElement); } else if (lineMatcher.find() && lineMatcher.groupCount() == 2) { int pageIndex = Integer.parseInt(lineMatcher.group(1)); int lineIndex = Integer.parseInt(lineMatcher.group(2)); TextLine textLine = readResults.get(pageIndex).getLines().get(lineIndex); FormLine lineElement = new FormLine(textLine.getText(), toBoundingBox(textLine.getBoundingBox()), pageIndex + 1, toWords(textLine.getWords(), pageIndex + 1)); FormLineHelper.setAppearance(lineElement, getTextAppearance(textLine)); formElementList.add(lineElement); } else if (selectionMarkMatcher.find() && selectionMarkMatcher.groupCount() == 2) { int pageIndex = Integer.parseInt(selectionMarkMatcher.group(1)); int selectionMarkIndex = Integer.parseInt(selectionMarkMatcher.group(2)); SelectionMark selectionMark = readResults.get(pageIndex).getSelectionMarks().get(selectionMarkIndex); FormSelectionMark selectionMarkElement = new FormSelectionMark(null, toBoundingBox(selectionMark.getBoundingBox()), pageIndex + 1); FormSelectionMarkHelper.setState(selectionMarkElement, SelectionMarkState.fromString(selectionMark.getState().toString())); formElementList.add(selectionMarkElement); } else { throw LOGGER.logExceptionAsError(new RuntimeException("Cannot find corresponding reference elements " + "for the field value.")); } }); return formElementList; } /** * Helper method to convert the service level {@link TextWord} to list of SDK level model {@link FormWord}. * * @param words A list of word reference elements returned by the service. * @param pageNumber The 1 based page number on which this word element exists. * @return The list of {@code FormWord words}. */ private static List<FormWord> toWords(List<TextWord> words, int pageNumber) { return words.stream() .map(textWord -> new FormWord( textWord.getText(), toBoundingBox(textWord.getBoundingBox()), pageNumber, setDefaultConfidenceValue(textWord.getConfidence())) ).collect(Collectors.toList()); } /** * Helper method to convert the service level modeled eight numbers representing the four points to SDK level * {@link FieldBoundingBox}. * * @param serviceBoundingBox A list of eight numbers representing the four points of a box. * @return A {@link FieldBoundingBox}. */ private static FieldBoundingBox toBoundingBox(List<Float> serviceBoundingBox) { if (CoreUtils.isNullOrEmpty(serviceBoundingBox) || (serviceBoundingBox.size() % 2) != 0) { return null; } List<Point> pointList = new ArrayList<>(); for (int i = 0; i < serviceBoundingBox.size(); i++) { pointList.add(new Point(serviceBoundingBox.get(i), serviceBoundingBox.get(++i))); } return new FieldBoundingBox(pointList); } }
class Transforms { private static final ClientLogger LOGGER = new ClientLogger(Transforms.class); private static final String WORD_REGEX = "/readResults/(\\d+)/lines/(\\d+)/words/(\\d+)"; private static final String LINE_REGEX = "/readResults/(\\d+)/lines/(\\d+)"; private static final String SELECTION_MARK_REGEX = "/readResults/(\\d+)/selectionMarks/(\\d+)"; private static final Pattern WORD_PATTERN = Pattern.compile(WORD_REGEX); private static final Pattern LINE_PATTERN = Pattern.compile(LINE_REGEX); private static final Pattern SELECTION_MARK_PATTERN = Pattern.compile(SELECTION_MARK_REGEX); private static final float DEFAULT_CONFIDENCE_VALUE = 1.0f; private static final int DEFAULT_TABLE_SPAN = 1; private Transforms() { } /** * Helper method to transform the service returned {@link AnalyzeResult} to SDK model {@link RecognizedForm}. * * @param analyzeResult The service returned result for analyze custom forms. * @param includeFieldElements Boolean to indicate if to set reference elements data on fields. * @param modelId the unlabeled model Id used for recognition. * @return The List of {@code RecognizedForm}. */ static List<RecognizedForm> toRecognizedForm(AnalyzeResult analyzeResult, boolean includeFieldElements, String modelId) { List<ReadResult> readResults = analyzeResult.getReadResults(); List<DocumentResult> documentResults = analyzeResult.getDocumentResults(); List<PageResult> pageResults = analyzeResult.getPageResults(); List<RecognizedForm> extractedFormList; List<FormPage> formPages = toRecognizedLayout(analyzeResult, includeFieldElements); if (!CoreUtils.isNullOrEmpty(documentResults)) { extractedFormList = new ArrayList<>(); for (DocumentResult documentResultItem : documentResults) { FormPageRange formPageRange; List<Integer> documentPageRange = documentResultItem.getPageRange(); if (documentPageRange.size() == 2) { formPageRange = new FormPageRange(documentPageRange.get(0), documentPageRange.get(1)); } else { formPageRange = new FormPageRange(1, 1); } Map<String, FormField> extractedFieldMap = getLabeledFieldMap(documentResultItem, readResults); final RecognizedForm recognizedForm = new RecognizedForm( extractedFieldMap, documentResultItem.getDocType(), formPageRange, formPages.subList(formPageRange.getFirstPageNumber() - 1, formPageRange.getLastPageNumber())); RecognizedFormHelper.setFormTypeConfidence(recognizedForm, documentResultItem.getDocTypeConfidence()); if (documentResultItem.getModelId() != null) { RecognizedFormHelper.setModelId(recognizedForm, documentResultItem.getModelId().toString()); } extractedFormList.add(recognizedForm); } } else { extractedFormList = new ArrayList<>(); if (!CoreUtils.isNullOrEmpty(pageResults)) { forEachWithIndex(pageResults, ((index, pageResultItem) -> { StringBuilder formType = new StringBuilder("form-"); int pageNumber = pageResultItem.getPage(); Integer clusterId = pageResultItem.getClusterId(); if (clusterId != null) { formType.append(clusterId); } Map<String, FormField> extractedFieldMap = getUnlabeledFieldMap(includeFieldElements, readResults, pageResultItem, pageNumber); final RecognizedForm recognizedForm = new RecognizedForm( extractedFieldMap, formType.toString(), new FormPageRange(pageNumber, pageNumber), Collections.singletonList(formPages.get(index))); RecognizedFormHelper.setModelId(recognizedForm, modelId); extractedFormList.add(recognizedForm); })); } } return extractedFormList; } /** * Helper method to transform the service returned {@link AnalyzeResult} to SDK model {@link FormPage}. * * @param analyzeResult The service returned result for analyze layouts. * @param includeFieldElements Boolean to indicate if to set reference elements data on fields. * @return The List of {@code FormPage}. */ static List<FormPage> toRecognizedLayout(AnalyzeResult analyzeResult, boolean includeFieldElements) { List<ReadResult> readResults = analyzeResult.getReadResults(); List<PageResult> pageResults = analyzeResult.getPageResults(); List<FormPage> formPages = new ArrayList<>(); boolean pageResultsIsNullOrEmpty = CoreUtils.isNullOrEmpty(pageResults); forEachWithIndex(readResults, ((index, readResultItem) -> { List<FormTable> perPageTableList = new ArrayList<>(); if (!pageResultsIsNullOrEmpty) { PageResult pageResultItem = pageResults.get(index); if (pageResultItem != null) { perPageTableList = getPageTables(pageResultItem, readResults, pageResultItem.getPage()); } } List<FormLine> perPageFormLineList = new ArrayList<>(); if (includeFieldElements && !CoreUtils.isNullOrEmpty(readResultItem.getLines())) { perPageFormLineList = getReadResultFormLines(readResultItem); } List<FormSelectionMark> perPageFormSelectionMarkList = new ArrayList<>(); if (includeFieldElements && !CoreUtils.isNullOrEmpty(readResultItem.getSelectionMarks())) { PageResult pageResultItem = pageResults.get(index); perPageFormSelectionMarkList = getReadResultFormSelectionMarks(readResultItem, pageResultItem.getPage()); } formPages.add(getFormPage(readResultItem, perPageTableList, perPageFormLineList, perPageFormSelectionMarkList)); })); return formPages; } /** * Helper method to convert the per page {@link ReadResult} item to {@link FormSelectionMark}. * * @param readResultItem The per page text extraction item result returned by the service. * @param pageNumber The page number. * @return A list of {@code FormSelectionMark}. */ /** * Helper method to get per-page table information. * * @param pageResultItem The extracted page level information returned by the service. * @param readResults The text extraction result returned by the service. * @param pageNumber The 1 based page number on which these fields exist. * @return The list of per page {@code FormTable}. */ static List<FormTable> getPageTables(PageResult pageResultItem, List<ReadResult> readResults, int pageNumber) { if (pageResultItem.getTables() == null) { return new ArrayList<>(); } else { return pageResultItem.getTables().stream() .map(dataTable -> { FormTable formTable = new FormTable(dataTable.getRows(), dataTable.getColumns(), dataTable.getCells() .stream() .map(dataTableCell -> new FormTableCell( dataTableCell.getRowIndex(), dataTableCell.getColumnIndex(), dataTableCell.getRowSpan() == null ? DEFAULT_TABLE_SPAN : dataTableCell.getRowSpan(), dataTableCell.getColumnSpan() == null ? DEFAULT_TABLE_SPAN : dataTableCell.getColumnSpan(), dataTableCell.getText(), toBoundingBox(dataTableCell.getBoundingBox()), dataTableCell.getConfidence(), dataTableCell.isHeader() != null && dataTableCell.isHeader(), dataTableCell.isFooter() != null && dataTableCell.isFooter(), pageNumber, setReferenceElements(dataTableCell.getElements(), readResults))) .collect(Collectors.toList()), pageNumber); FormTableHelper.setBoundingBox(formTable, toBoundingBox(dataTable.getBoundingBox())); return formTable; }) .collect(Collectors.toList()); } } /** * Helper method to convert the per page {@link ReadResult} item to {@link FormLine}. * * @param readResultItem The per page text extraction item result returned by the service. * @return The list of {@code FormLine}. */ static List<FormLine> getReadResultFormLines(ReadResult readResultItem) { return readResultItem.getLines().stream() .map(textLine -> { FormLine formLine = new FormLine( textLine.getText(), toBoundingBox(textLine.getBoundingBox()), readResultItem.getPage(), toWords(textLine.getWords(), readResultItem.getPage())); FormLineHelper.setAppearance(formLine, getTextAppearance(textLine)); return formLine; }) .collect(Collectors.toList()); } /** * Private method to get the appearance from the service side text line object. * * @param textLine The service side text line object. * @return the custom type TextAppearance model. */ private static TextAppearance getTextAppearance(TextLine textLine) { TextAppearance textAppearance = new TextAppearance(); if (textLine.getAppearance() != null && textLine.getAppearance().getStyle() != null) { if (textLine.getAppearance().getStyle().getName() != null) { TextAppearanceHelper.setStyleName(textAppearance, TextStyleName.fromString(textLine.getAppearance().getStyle().getName().toString())); } TextAppearanceHelper.setStyleConfidence(textAppearance, textLine.getAppearance().getStyle().getConfidence()); } else { return null; } return textAppearance; } /** * The field map returned on analyze with an unlabeled model id. * * @param documentResultItem The extracted document level information. * @param readResults The text extraction result returned by the service. * @return The {@link RecognizedForm */ private static Map<String, FormField> getLabeledFieldMap(DocumentResult documentResultItem, List<ReadResult> readResults) { Map<String, FormField> recognizedFieldMap = new LinkedHashMap<>(); if (!CoreUtils.isNullOrEmpty(documentResultItem.getFields())) { documentResultItem.getFields().forEach((key, fieldValue) -> { if (fieldValue != null) { List<FormElement> formElementList = setReferenceElements(fieldValue.getElements(), readResults); FieldData valueData; if (fieldValue.getPage() == null && CoreUtils.isNullOrEmpty(fieldValue.getBoundingBox())) { valueData = null; } else { valueData = new FieldData(fieldValue.getText(), toBoundingBox(fieldValue.getBoundingBox()), fieldValue.getPage(), formElementList); } recognizedFieldMap.put(key, setFormField(key, valueData, fieldValue, readResults)); } else { recognizedFieldMap.put(key, new FormField(key, null, null, null, DEFAULT_CONFIDENCE_VALUE)); } }); } return recognizedFieldMap; } /** * Helper method that converts the incoming service field value to one of the strongly typed SDK level * {@link FormField} with reference elements set when {@code includeFieldElements} is set to true. * * @param name The name of the field. * @param valueData The value text of the field. * @param fieldValue The named field values returned by the service. * @param readResults The text extraction result returned by the service. * @return The strongly typed {@link FormField} for the field input. */ private static FormField setFormField(String name, FieldData valueData, FieldValue fieldValue, List<ReadResult> readResults) { com.azure.ai.formrecognizer.models.FieldValue value; switch (fieldValue.getType()) { case PHONE_NUMBER: value = new com.azure.ai.formrecognizer.models.FieldValue(fieldValue.getValuePhoneNumber(), FieldValueType.PHONE_NUMBER); break; case STRING: value = new com.azure.ai.formrecognizer.models.FieldValue(fieldValue.getValueString(), FieldValueType.STRING); break; case TIME: if (fieldValue.getValueTime() != null) { LocalTime fieldTime = LocalTime.parse(fieldValue.getValueTime(), DateTimeFormatter.ofPattern("HH:mm:ss")); value = new com.azure.ai.formrecognizer.models.FieldValue(fieldTime, FieldValueType.TIME); } else { value = new com.azure.ai.formrecognizer.models.FieldValue(null, FieldValueType.TIME); } break; case DATE: value = new com.azure.ai.formrecognizer.models.FieldValue(fieldValue.getValueDate(), FieldValueType.DATE); break; case INTEGER: if (fieldValue.getValueInteger() != null) { value = new com.azure.ai.formrecognizer.models.FieldValue(fieldValue.getValueInteger().longValue(), FieldValueType.LONG); } else { value = new com.azure.ai.formrecognizer.models.FieldValue(null, FieldValueType.LONG); } break; case NUMBER: value = new com.azure.ai.formrecognizer.models.FieldValue(fieldValue.getValueNumber(), FieldValueType.FLOAT); break; case ARRAY: if (fieldValue.getValueArray() != null) { value = new com.azure.ai.formrecognizer.models.FieldValue( toFieldValueArray(fieldValue.getValueArray(), readResults), FieldValueType.LIST); } else { value = new com.azure.ai.formrecognizer.models.FieldValue(null, FieldValueType.LIST); } break; case OBJECT: if (fieldValue.getValueObject() != null) { value = new com.azure.ai.formrecognizer.models.FieldValue( toFieldValueObject(fieldValue.getValueObject(), readResults), FieldValueType.MAP); } else { value = new com.azure.ai.formrecognizer.models.FieldValue(null, FieldValueType.MAP); } break; case SELECTION_MARK: if (fieldValue.getValueSelectionMark() != null) { com.azure.ai.formrecognizer.models.SelectionMarkState selectionMarkState; final FieldValueSelectionMark fieldValueSelectionMarkState = fieldValue.getValueSelectionMark(); if (FieldValueSelectionMark.SELECTED.equals(fieldValueSelectionMarkState)) { selectionMarkState = com.azure.ai.formrecognizer.models.SelectionMarkState.SELECTED; } else if (FieldValueSelectionMark.UNSELECTED.equals(fieldValueSelectionMarkState)) { selectionMarkState = com.azure.ai.formrecognizer.models.SelectionMarkState.UNSELECTED; } else { selectionMarkState = com.azure.ai.formrecognizer.models.SelectionMarkState.fromString( fieldValue.getValueSelectionMark().toString()); } value = new com.azure.ai.formrecognizer.models.FieldValue(selectionMarkState, FieldValueType.SELECTION_MARK_STATE); } else { value = new com.azure.ai.formrecognizer.models.FieldValue(null, FieldValueType.SELECTION_MARK_STATE); } break; case COUNTRY_REGION: value = new com.azure.ai.formrecognizer.models.FieldValue(fieldValue.getValueCountryRegion(), FieldValueType.COUNTRY_REGION); break; default: throw LOGGER.logExceptionAsError(new RuntimeException("FieldValue Type not supported")); } return new FormField(name, null, valueData, value, setDefaultConfidenceValue(fieldValue.getConfidence())); } /** * Helper method to set default confidence value if confidence returned by service is null. * * @param confidence the confidence returned by service. * @return the field confidence value. */ private static float setDefaultConfidenceValue(Float confidence) { return confidence == null ? DEFAULT_CONFIDENCE_VALUE : confidence; } /** * Helper method to convert the service returned * {@link com.azure.ai.formrecognizer.implementation.models.FieldValue * to a SDK level map of {@link FormField}. * * @param valueObject The array of field values returned by the service in {@link FieldValue * @return The Map of {@link FormField}. */ private static Map<String, FormField> toFieldValueObject(Map<String, FieldValue> valueObject, List<ReadResult> readResults) { Map<String, FormField> fieldValueObjectMap = new TreeMap<>(); valueObject.forEach((key, fieldValue) -> { FieldData valueData = null; if (fieldValue.getPage() != null && fieldValue.getBoundingBox() != null) { valueData = new FieldData(fieldValue.getText(), toBoundingBox(fieldValue.getBoundingBox()), fieldValue.getPage(), setReferenceElements(fieldValue.getElements(), readResults)); } fieldValueObjectMap.put(key, setFormField(key, valueData, fieldValue, readResults)); }); return fieldValueObjectMap; } /** * Helper method to convert the service returned * {@link com.azure.ai.formrecognizer.implementation.models.FieldValue * to a SDK level List of {@link FormField}. * * @param valueArray The array of field values returned by the service in {@link FieldValue * @param readResults The text extraction result returned by the service. * @return The List of {@link FormField}. */ private static List<FormField> toFieldValueArray(List<FieldValue> valueArray, List<ReadResult> readResults) { return valueArray.stream() .map(fieldValue -> { FieldData valueData = null; if (ARRAY != fieldValue.getType() && (fieldValue.getPage() != null && fieldValue.getBoundingBox() != null)) { valueData = new FieldData(fieldValue.getText(), toBoundingBox(fieldValue.getBoundingBox()), fieldValue.getPage(), setReferenceElements(fieldValue.getElements(), readResults)); } return setFormField(null, valueData, fieldValue, readResults); }) .collect(Collectors.toList()); } /** * Helper method to convert the page results to {@code FormPage form pages}. * * @param readResultItem The per page text extraction item result returned by the service. * @param perPageTableList The per page tables list. * @param perPageLineList The per page form lines. * @param perPageSelectionMarkList The per page selection marks. * @return The per page {@code FormPage}. */ private static FormPage getFormPage(ReadResult readResultItem, List<FormTable> perPageTableList, List<FormLine> perPageLineList, List<FormSelectionMark> perPageSelectionMarkList) { FormPage formPage = new FormPage( readResultItem.getHeight(), readResultItem.getAngle(), LengthUnit.fromString(readResultItem.getUnit().toString()), readResultItem.getWidth(), perPageLineList, perPageTableList, readResultItem.getPage()); FormPageHelper.setSelectionMarks(formPage, perPageSelectionMarkList); return formPage; } /** * Helper method to set the {@link RecognizedForm * service. * * @param includeFieldElements Boolean to indicate if to set reference elements data on fields. * @param readResults The text extraction result returned by the service. * @param pageResultItem The extracted page level information returned by the service. * @param pageNumber The 1 based page number on which these fields exist. * @return The fields populated on {@link RecognizedForm */ private static Map<String, FormField> getUnlabeledFieldMap(boolean includeFieldElements, List<ReadResult> readResults, PageResult pageResultItem, int pageNumber) { Map<String, FormField> formFieldMap = new LinkedHashMap<>(); List<KeyValuePair> keyValuePairs = pageResultItem.getKeyValuePairs(); forEachWithIndex(keyValuePairs, ((index, keyValuePair) -> { List<FormElement> formKeyContentList = new ArrayList<>(); List<FormElement> formValueContentList = new ArrayList<>(); if (includeFieldElements) { formKeyContentList = setReferenceElements(keyValuePair.getKey().getElements(), readResults); formValueContentList = setReferenceElements(keyValuePair.getValue().getElements(), readResults ); } FieldData labelData = new FieldData(keyValuePair.getKey().getText(), toBoundingBox(keyValuePair.getKey().getBoundingBox()), pageNumber, formKeyContentList); FieldData valueData = new FieldData(keyValuePair.getValue().getText(), toBoundingBox(keyValuePair.getValue().getBoundingBox()), pageNumber, formValueContentList); String fieldName = "field-" + index; FormField formField = new FormField(fieldName, labelData, valueData, new com.azure.ai.formrecognizer.models.FieldValue(keyValuePair.getValue().getText(), FieldValueType.STRING), setDefaultConfidenceValue(keyValuePair.getConfidence()) ); formFieldMap.put(fieldName, formField); })); return formFieldMap; } /** * Helper method to set the text reference elements on FieldValue/fields when {@code includeFieldElements} set to * true. * * @return The list if referenced elements. */ private static List<FormElement> setReferenceElements(List<String> elements, List<ReadResult> readResults) { if (CoreUtils.isNullOrEmpty(elements)) { return new ArrayList<>(); } List<FormElement> formElementList = new ArrayList<>(); elements.forEach(elementString -> { Matcher wordMatcher = WORD_PATTERN.matcher(elementString); if (wordMatcher.find() && wordMatcher.groupCount() == 3) { int pageIndex = Integer.parseInt(wordMatcher.group(1)); int lineIndex = Integer.parseInt(wordMatcher.group(2)); int wordIndex = Integer.parseInt(wordMatcher.group(3)); TextWord textWord = readResults.get(pageIndex).getLines().get(lineIndex).getWords().get(wordIndex); FormWord wordElement = new FormWord(textWord.getText(), toBoundingBox(textWord.getBoundingBox()), pageIndex + 1, setDefaultConfidenceValue(textWord.getConfidence())); formElementList.add(wordElement); } Matcher lineMatcher = LINE_PATTERN.matcher(elementString); if (lineMatcher.find() && lineMatcher.groupCount() == 2) { int pageIndex = Integer.parseInt(lineMatcher.group(1)); int lineIndex = Integer.parseInt(lineMatcher.group(2)); TextLine textLine = readResults.get(pageIndex).getLines().get(lineIndex); FormLine lineElement = new FormLine(textLine.getText(), toBoundingBox(textLine.getBoundingBox()), pageIndex + 1, toWords(textLine.getWords(), pageIndex + 1)); FormLineHelper.setAppearance(lineElement, getTextAppearance(textLine)); formElementList.add(lineElement); } Matcher selectionMarkMatcher = SELECTION_MARK_PATTERN.matcher(elementString); if (selectionMarkMatcher.find() && selectionMarkMatcher.groupCount() == 2) { int pageIndex = Integer.parseInt(selectionMarkMatcher.group(1)); int selectionMarkIndex = Integer.parseInt(selectionMarkMatcher.group(2)); SelectionMark selectionMark = readResults.get(pageIndex).getSelectionMarks().get(selectionMarkIndex); FormSelectionMark selectionMarkElement = new FormSelectionMark(null, toBoundingBox(selectionMark.getBoundingBox()), pageIndex + 1); FormSelectionMarkHelper.setState(selectionMarkElement, SelectionMarkState.fromString(selectionMark.getState().toString())); formElementList.add(selectionMarkElement); } }); return formElementList; } /** * Helper method to convert the service level {@link TextWord} to list of SDK level model {@link FormWord}. * * @param words A list of word reference elements returned by the service. * @param pageNumber The 1 based page number on which this word element exists. * @return The list of {@code FormWord words}. */ private static List<FormWord> toWords(List<TextWord> words, int pageNumber) { return words.stream() .map(textWord -> new FormWord( textWord.getText(), toBoundingBox(textWord.getBoundingBox()), pageNumber, setDefaultConfidenceValue(textWord.getConfidence())) ).collect(Collectors.toList()); } /** * Helper method to convert the service level modeled eight numbers representing the four points to SDK level * {@link FieldBoundingBox}. * * @param serviceBoundingBox A list of eight numbers representing the four points of a box. * @return A {@link FieldBoundingBox}. */ private static FieldBoundingBox toBoundingBox(List<Float> serviceBoundingBox) { if (CoreUtils.isNullOrEmpty(serviceBoundingBox) || (serviceBoundingBox.size() % 2) != 0) { return null; } List<Point> pointList = new ArrayList<>(); for (int i = 0; i < serviceBoundingBox.size(); i++) { pointList.add(new Point(serviceBoundingBox.get(i), serviceBoundingBox.get(++i))); } return new FieldBoundingBox(pointList); } }
should we enforce static usage instead (after everything is converted)?
public void visitToken(DetailAST ast) { switch (ast.getType()) { case TokenTypes.IMPORT: final String importClassPath = FullIdent.createFullIdentBelow(ast).getText(); hasClientLoggerImported = hasClientLoggerImported || importClassPath.equals(CLIENT_LOGGER_PATH); INVALID_LOGS.forEach(item -> { if (importClassPath.startsWith(item)) { log(ast, String.format(NOT_CLIENT_LOGGER_ERROR, "external logger", CLIENT_LOGGER_PATH, item)); } }); break; case TokenTypes.CLASS_DEF: case TokenTypes.INTERFACE_DEF: classNameDeque.offer(ast.findFirstToken(TokenTypes.IDENT).getText()); break; case TokenTypes.LITERAL_NEW: checkLoggerInstantiation(ast); break; case TokenTypes.VARIABLE_DEF: checkLoggerNameMatch(ast); break; case TokenTypes.METHOD_CALL: final DetailAST dotToken = ast.findFirstToken(TokenTypes.DOT); if (dotToken == null) { return; } final String methodCallName = FullIdent.createFullIdentBelow(dotToken).getText(); if (methodCallName.startsWith("System.out") || methodCallName.startsWith("System.err")) { log(ast, String.format(NOT_CLIENT_LOGGER_ERROR, "Java System", CLIENT_LOGGER_PATH, methodCallName)); } break; default: break; } }
break;
public void visitToken(DetailAST ast) { switch (ast.getType()) { case TokenTypes.IMPORT: final String importClassPath = FullIdent.createFullIdentBelow(ast).getText(); hasClientLoggerImported = hasClientLoggerImported || importClassPath.equals(CLIENT_LOGGER_PATH); INVALID_LOGS.forEach(item -> { if (importClassPath.startsWith(item)) { log(ast, String.format(NOT_CLIENT_LOGGER_ERROR, "external logger", CLIENT_LOGGER_PATH, item)); } }); break; case TokenTypes.CLASS_DEF: case TokenTypes.INTERFACE_DEF: classNameDeque.offer(ast.findFirstToken(TokenTypes.IDENT).getText()); break; case TokenTypes.LITERAL_NEW: checkLoggerInstantiation(ast); break; case TokenTypes.VARIABLE_DEF: checkLoggerNameMatch(ast); break; case TokenTypes.METHOD_CALL: final DetailAST dotToken = ast.findFirstToken(TokenTypes.DOT); if (dotToken == null) { return; } final String methodCallName = FullIdent.createFullIdentBelow(dotToken).getText(); if (methodCallName.startsWith("System.out") || methodCallName.startsWith("System.err")) { log(ast, String.format(NOT_CLIENT_LOGGER_ERROR, "Java System", CLIENT_LOGGER_PATH, methodCallName)); } break; default: break; } }
class name AST node private final Queue<String> classNameDeque = Collections.asLifoQueue(new ArrayDeque<>()); private static final Set<String> INVALID_LOGS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList( "org.slf4j", "org.apache.logging.log4j", "java.util.logging" ))); @Override public int[] getDefaultTokens() { return getRequiredTokens(); }
class name AST node private final Queue<String> classNameDeque = Collections.asLifoQueue(new ArrayDeque<>()); private static final Set<String> INVALID_LOGS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList( "org.slf4j", "org.apache.logging.log4j", "java.util.logging" ))); @Override public int[] getDefaultTokens() { return getRequiredTokens(); }
wellKnown
public KeyVaultKeyStore() { LOGGER.log(FINE, "Constructing KeyVaultKeyStore."); creationDate = new Date(); String keyVaultUri = System.getProperty("azure.keyvault.uri"); String tenantId = System.getProperty("azure.keyvault.tenant-id"); String clientId = System.getProperty("azure.keyvault.client-id"); String clientSecret = System.getProperty("azure.keyvault.client-secret"); String managedIdentity = System.getProperty("azure.keyvault.managed-identity"); long refreshInterval = getRefreshInterval(); refreshCertificatesWhenHaveUnTrustCertificate = Optional.of("azure.keyvault.jca.refresh-certificates-when-have-un-trust-certificate") .map(System::getProperty) .map(Boolean::parseBoolean) .orElse(false); jreCertificates = JreCertificates.getInstance(); LOGGER.log(FINE, String.format("Loaded jre certificates: %s.", jreCertificates.getAliases())); wellKnowCertificates = SpecificPathCertificates.getSpecificPathCertificates(wellKnowPath); LOGGER.log(FINE, String.format("Loaded well know certificates: %s.", wellKnowCertificates.getAliases())); customCertificates = SpecificPathCertificates.getSpecificPathCertificates(customPath); LOGGER.log(FINE, String.format("Loaded custom certificates: %s.", customCertificates.getAliases())); keyVaultCertificates = new KeyVaultCertificates( refreshInterval, keyVaultUri, tenantId, clientId, clientSecret, managedIdentity); LOGGER.log(FINE, String.format("Loaded keyVault certificates: %s.", keyVaultCertificates.getAliases())); classpathCertificates = new ClasspathCertificates(); LOGGER.log(FINE, String.format("Loaded classpath certificates: %s.", classpathCertificates.getAliases())); allCertificates = Arrays.asList( jreCertificates, wellKnowCertificates, customCertificates, keyVaultCertificates, classpathCertificates); }
LOGGER.log(FINE, String.format("Loaded well know certificates: %s.", wellKnowCertificates.getAliases()));
public KeyVaultKeyStore() { LOGGER.log(FINE, "Constructing KeyVaultKeyStore."); creationDate = new Date(); String keyVaultUri = System.getProperty("azure.keyvault.uri"); String tenantId = System.getProperty("azure.keyvault.tenant-id"); String clientId = System.getProperty("azure.keyvault.client-id"); String clientSecret = System.getProperty("azure.keyvault.client-secret"); String managedIdentity = System.getProperty("azure.keyvault.managed-identity"); long refreshInterval = getRefreshInterval(); refreshCertificatesWhenHaveUnTrustCertificate = Optional.of("azure.keyvault.jca.refresh-certificates-when-have-un-trust-certificate") .map(System::getProperty) .map(Boolean::parseBoolean) .orElse(false); jreCertificates = JreCertificates.getInstance(); LOGGER.log(FINE, String.format("Loaded jre certificates: %s.", jreCertificates.getAliases())); wellKnowCertificates = SpecificPathCertificates.getSpecificPathCertificates(wellKnowPath); LOGGER.log(FINE, String.format("Loaded well know certificates: %s.", wellKnowCertificates.getAliases())); customCertificates = SpecificPathCertificates.getSpecificPathCertificates(customPath); LOGGER.log(FINE, String.format("Loaded custom certificates: %s.", customCertificates.getAliases())); keyVaultCertificates = new KeyVaultCertificates( refreshInterval, keyVaultUri, tenantId, clientId, clientSecret, managedIdentity); LOGGER.log(FINE, String.format("Loaded keyVault certificates: %s.", keyVaultCertificates.getAliases())); classpathCertificates = new ClasspathCertificates(); LOGGER.log(FINE, String.format("Loaded classpath certificates: %s.", classpathCertificates.getAliases())); allCertificates = Arrays.asList( jreCertificates, wellKnowCertificates, customCertificates, keyVaultCertificates, classpathCertificates); }
class KeyVaultKeyStore extends KeyStoreSpi { /** * Stores the key-store name. */ public static final String KEY_STORE_TYPE = "AzureKeyVault"; /** * Stores the algorithm name. */ public static final String ALGORITHM_NAME = KEY_STORE_TYPE; /** * Stores the logger. */ private static final Logger LOGGER = Logger.getLogger(KeyVaultKeyStore.class.getName()); /** * Stores the Jre key store certificates. */ private final JreCertificates jreCertificates; /** * Store well Know certificates loaded from specific path. */ private final SpecificPathCertificates wellKnowCertificates; /** * Store custom certificates loaded from specific path. */ private final SpecificPathCertificates customCertificates; /** * Store certificates loaded from KeyVault. */ private final KeyVaultCertificates keyVaultCertificates; /** * Store certificates loaded from classpath. */ private final ClasspathCertificates classpathCertificates; /** * Stores all the certificates. */ private final List<AzureCertificates> allCertificates; /** * Stores the creation date. */ private final Date creationDate; private final boolean refreshCertificatesWhenHaveUnTrustCertificate; /** * Store the path where the well know certificate is placed */ final String wellKnowPath = Optional.ofNullable(System.getProperty("azure.cert-path.well-known")) .orElse("/etc/certs/well-known/"); /** * Store the path where the custom certificate is placed */ final String customPath = Optional.ofNullable(System.getProperty("azure.cert-path.custom")) .orElse("/etc/certs/custom/"); /** * Constructor. * * <p> * The constructor uses System.getProperty for * <code>azure.keyvault.uri</code>, * <code>azure.keyvault.aadAuthenticationUrl</code>, * <code>azure.keyvault.tenantId</code>, * <code>azure.keyvault.clientId</code>, * <code>azure.keyvault.clientSecret</code> and * <code>azure.keyvault.managedIdentity</code> to initialize the * Key Vault client. * </p> */ Long getRefreshInterval() { return Stream.of("azure.keyvault.jca.certificates-refresh-interval-in-ms", "azure.keyvault.jca.certificates-refresh-interval") .map(System::getProperty) .filter(Objects::nonNull) .map(Long::valueOf) .findFirst() .orElse(0L); } /** * get key vault key store by system property * * @return KeyVault key store * @throws CertificateException if any of the certificates in the * keystore could not be loaded * @throws NoSuchAlgorithmException when algorithm is unavailable. * @throws KeyStoreException when no Provider supports a KeyStoreSpi implementation for the specified type * @throws IOException when an I/O error occurs. */ public static KeyStore getKeyVaultKeyStoreBySystemProperty() throws CertificateException, NoSuchAlgorithmException, KeyStoreException, IOException { KeyStore keyStore = KeyStore.getInstance(KeyVaultJcaProvider.PROVIDER_NAME); KeyVaultLoadStoreParameter parameter = new KeyVaultLoadStoreParameter( System.getProperty("azure.keyvault.uri"), System.getProperty("azure.keyvault.tenant-id"), System.getProperty("azure.keyvault.client-id"), System.getProperty("azure.keyvault.client-secret"), System.getProperty("azure.keyvault.managed-identity")); keyStore.load(parameter); return keyStore; } @Override public Enumeration<String> engineAliases() { return Collections.enumeration(getAllAliases()); } @Override public boolean engineContainsAlias(String alias) { return engineIsCertificateEntry(alias); } @Override public void engineDeleteEntry(String alias) { allCertificates.forEach(a -> a.deleteEntry(alias)); } @Override public boolean engineEntryInstanceOf(String alias, Class<? extends KeyStore.Entry> entryClass) { return super.engineEntryInstanceOf(alias, entryClass); } @Override public Certificate engineGetCertificate(String alias) { Certificate certificate = allCertificates.stream() .map(AzureCertificates::getCertificates) .filter(a -> a.containsKey(alias)) .findFirst() .map(certificates -> certificates.get(alias)) .orElse(null); if (refreshCertificatesWhenHaveUnTrustCertificate && certificate == null) { keyVaultCertificates.refreshCertificates(); certificate = keyVaultCertificates.getCertificates().get(alias); } return certificate; } @Override public String engineGetCertificateAlias(Certificate cert) { String alias = null; if (cert != null) { List<String> aliasList = getAllAliases(); for (String candidateAlias : aliasList) { Certificate certificate = engineGetCertificate(candidateAlias); if (certificate.equals(cert)) { alias = candidateAlias; break; } } } if (refreshCertificatesWhenHaveUnTrustCertificate && alias == null) { alias = keyVaultCertificates.refreshAndGetAliasByCertificate(cert); } return alias; } @Override public Certificate[] engineGetCertificateChain(String alias) { Certificate[] chain = null; Certificate certificate = engineGetCertificate(alias); if (certificate != null) { chain = new Certificate[1]; chain[0] = certificate; } return chain; } @Override public Date engineGetCreationDate(String alias) { return new Date(creationDate.getTime()); } @Override public KeyStore.Entry engineGetEntry(String alias, KeyStore.ProtectionParameter protParam) throws KeyStoreException, NoSuchAlgorithmException, UnrecoverableEntryException { return super.engineGetEntry(alias, protParam); } @Override public Key engineGetKey(String alias, char[] password) { return allCertificates.stream() .map(AzureCertificates::getCertificateKeys) .filter(a -> a.containsKey(alias)) .findFirst() .map(certificateKeys -> certificateKeys.get(alias)) .orElse(null); } @Override public boolean engineIsCertificateEntry(String alias) { return getAllAliases().contains(alias); } @Override public boolean engineIsKeyEntry(String alias) { return engineIsCertificateEntry(alias); } @Override public void engineLoad(KeyStore.LoadStoreParameter param) { if (param instanceof KeyVaultLoadStoreParameter) { KeyVaultLoadStoreParameter parameter = (KeyVaultLoadStoreParameter) param; keyVaultCertificates.updateKeyVaultClient(parameter.getUri(), parameter.getTenantId(), parameter.getClientId(), parameter.getClientSecret(), parameter.getManagedIdentity()); } classpathCertificates.loadCertificatesFromClasspath(); } @Override public void engineLoad(InputStream stream, char[] password) { classpathCertificates.loadCertificatesFromClasspath(); } private List<String> getAllAliases() { List<String> allAliases = new ArrayList<>(jreCertificates.getAliases()); Map<String, List<String>> aliasLists = new HashMap<>(); aliasLists.put("well known certificates", wellKnowCertificates.getAliases()); aliasLists.put("custom certificates", customCertificates.getAliases()); aliasLists.put("key vault certificates", keyVaultCertificates.getAliases()); aliasLists.put("class path certificates", classpathCertificates.getAliases()); aliasLists.forEach((certificatesType, certificates) -> certificates.forEach(alias -> { if (allAliases.contains(alias)) { LOGGER.log(FINE, String.format("The certificate %s under %s already exists", alias, certificatesType)); } else { allAliases.add(alias); } })); return allAliases; } @Override public void engineSetCertificateEntry(String alias, Certificate certificate) { if (getAllAliases().contains(alias)) { LOGGER.log(WARNING, "Cannot overwrite own certificate"); return; } classpathCertificates.setCertificateEntry(alias, certificate); } @Override public void engineSetEntry(String alias, KeyStore.Entry entry, KeyStore.ProtectionParameter protParam) throws KeyStoreException { super.engineSetEntry(alias, entry, protParam); } @Override public void engineSetKeyEntry(String alias, Key key, char[] password, Certificate[] chain) { } @Override public void engineSetKeyEntry(String alias, byte[] key, Certificate[] chain) { } @Override public int engineSize() { return getAllAliases().size(); } @Override public void engineStore(OutputStream stream, char[] password) { } @Override public void engineStore(KeyStore.LoadStoreParameter param) { } }
class KeyVaultKeyStore extends KeyStoreSpi { /** * Stores the key-store name. */ public static final String KEY_STORE_TYPE = "AzureKeyVault"; /** * Stores the algorithm name. */ public static final String ALGORITHM_NAME = KEY_STORE_TYPE; /** * Stores the logger. */ private static final Logger LOGGER = Logger.getLogger(KeyVaultKeyStore.class.getName()); /** * Stores the Jre key store certificates. */ private final JreCertificates jreCertificates; /** * Store well Know certificates loaded from specific path. */ private final SpecificPathCertificates wellKnowCertificates; /** * Store custom certificates loaded from specific path. */ private final SpecificPathCertificates customCertificates; /** * Store certificates loaded from KeyVault. */ private final KeyVaultCertificates keyVaultCertificates; /** * Store certificates loaded from classpath. */ private final ClasspathCertificates classpathCertificates; /** * Stores all the certificates. */ private final List<AzureCertificates> allCertificates; /** * Stores the creation date. */ private final Date creationDate; private final boolean refreshCertificatesWhenHaveUnTrustCertificate; /** * Store the path where the well know certificate is placed */ final String wellKnowPath = Optional.ofNullable(System.getProperty("azure.cert-path.well-known")) .orElse("/etc/certs/well-known/"); /** * Store the path where the custom certificate is placed */ final String customPath = Optional.ofNullable(System.getProperty("azure.cert-path.custom")) .orElse("/etc/certs/custom/"); /** * Constructor. * * <p> * The constructor uses System.getProperty for * <code>azure.keyvault.uri</code>, * <code>azure.keyvault.aadAuthenticationUrl</code>, * <code>azure.keyvault.tenantId</code>, * <code>azure.keyvault.clientId</code>, * <code>azure.keyvault.clientSecret</code> and * <code>azure.keyvault.managedIdentity</code> to initialize the * Key Vault client. * </p> */ Long getRefreshInterval() { return Stream.of("azure.keyvault.jca.certificates-refresh-interval-in-ms", "azure.keyvault.jca.certificates-refresh-interval") .map(System::getProperty) .filter(Objects::nonNull) .map(Long::valueOf) .findFirst() .orElse(0L); } /** * get key vault key store by system property * * @return KeyVault key store * @throws CertificateException if any of the certificates in the * keystore could not be loaded * @throws NoSuchAlgorithmException when algorithm is unavailable. * @throws KeyStoreException when no Provider supports a KeyStoreSpi implementation for the specified type * @throws IOException when an I/O error occurs. */ public static KeyStore getKeyVaultKeyStoreBySystemProperty() throws CertificateException, NoSuchAlgorithmException, KeyStoreException, IOException { KeyStore keyStore = KeyStore.getInstance(KeyVaultJcaProvider.PROVIDER_NAME); KeyVaultLoadStoreParameter parameter = new KeyVaultLoadStoreParameter( System.getProperty("azure.keyvault.uri"), System.getProperty("azure.keyvault.tenant-id"), System.getProperty("azure.keyvault.client-id"), System.getProperty("azure.keyvault.client-secret"), System.getProperty("azure.keyvault.managed-identity")); keyStore.load(parameter); return keyStore; } @Override public Enumeration<String> engineAliases() { return Collections.enumeration(getAllAliases()); } @Override public boolean engineContainsAlias(String alias) { return engineIsCertificateEntry(alias); } @Override public void engineDeleteEntry(String alias) { allCertificates.forEach(a -> a.deleteEntry(alias)); } @Override public boolean engineEntryInstanceOf(String alias, Class<? extends KeyStore.Entry> entryClass) { return super.engineEntryInstanceOf(alias, entryClass); } @Override public Certificate engineGetCertificate(String alias) { Certificate certificate = allCertificates.stream() .map(AzureCertificates::getCertificates) .filter(a -> a.containsKey(alias)) .findFirst() .map(certificates -> certificates.get(alias)) .orElse(null); if (refreshCertificatesWhenHaveUnTrustCertificate && certificate == null) { keyVaultCertificates.refreshCertificates(); certificate = keyVaultCertificates.getCertificates().get(alias); } return certificate; } @Override public String engineGetCertificateAlias(Certificate cert) { String alias = null; if (cert != null) { List<String> aliasList = getAllAliases(); for (String candidateAlias : aliasList) { Certificate certificate = engineGetCertificate(candidateAlias); if (certificate.equals(cert)) { alias = candidateAlias; break; } } } if (refreshCertificatesWhenHaveUnTrustCertificate && alias == null) { alias = keyVaultCertificates.refreshAndGetAliasByCertificate(cert); } return alias; } @Override public Certificate[] engineGetCertificateChain(String alias) { Certificate[] chain = null; Certificate certificate = engineGetCertificate(alias); if (certificate != null) { chain = new Certificate[1]; chain[0] = certificate; } return chain; } @Override public Date engineGetCreationDate(String alias) { return new Date(creationDate.getTime()); } @Override public KeyStore.Entry engineGetEntry(String alias, KeyStore.ProtectionParameter protParam) throws KeyStoreException, NoSuchAlgorithmException, UnrecoverableEntryException { return super.engineGetEntry(alias, protParam); } @Override public Key engineGetKey(String alias, char[] password) { return allCertificates.stream() .map(AzureCertificates::getCertificateKeys) .filter(a -> a.containsKey(alias)) .findFirst() .map(certificateKeys -> certificateKeys.get(alias)) .orElse(null); } @Override public boolean engineIsCertificateEntry(String alias) { return getAllAliases().contains(alias); } @Override public boolean engineIsKeyEntry(String alias) { return engineIsCertificateEntry(alias); } @Override public void engineLoad(KeyStore.LoadStoreParameter param) { if (param instanceof KeyVaultLoadStoreParameter) { KeyVaultLoadStoreParameter parameter = (KeyVaultLoadStoreParameter) param; keyVaultCertificates.updateKeyVaultClient(parameter.getUri(), parameter.getTenantId(), parameter.getClientId(), parameter.getClientSecret(), parameter.getManagedIdentity()); } classpathCertificates.loadCertificatesFromClasspath(); } @Override public void engineLoad(InputStream stream, char[] password) { classpathCertificates.loadCertificatesFromClasspath(); } private List<String> getAllAliases() { List<String> allAliases = new ArrayList<>(jreCertificates.getAliases()); Map<String, List<String>> aliasLists = new HashMap<>(); aliasLists.put("well known certificates", wellKnowCertificates.getAliases()); aliasLists.put("custom certificates", customCertificates.getAliases()); aliasLists.put("key vault certificates", keyVaultCertificates.getAliases()); aliasLists.put("class path certificates", classpathCertificates.getAliases()); aliasLists.forEach((certificatesType, certificates) -> certificates.forEach(alias -> { if (allAliases.contains(alias)) { LOGGER.log(FINE, String.format("The certificate %s under %s already exists", alias, certificatesType)); } else { allAliases.add(alias); } })); return allAliases; } @Override public void engineSetCertificateEntry(String alias, Certificate certificate) { if (getAllAliases().contains(alias)) { LOGGER.log(WARNING, "Cannot overwrite own certificate"); return; } classpathCertificates.setCertificateEntry(alias, certificate); } @Override public void engineSetEntry(String alias, KeyStore.Entry entry, KeyStore.ProtectionParameter protParam) throws KeyStoreException { super.engineSetEntry(alias, entry, protParam); } @Override public void engineSetKeyEntry(String alias, Key key, char[] password, Certificate[] chain) { } @Override public void engineSetKeyEntry(String alias, byte[] key, Certificate[] chain) { } @Override public int engineSize() { return getAllAliases().size(); } @Override public void engineStore(OutputStream stream, char[] password) { } @Override public void engineStore(KeyStore.LoadStoreParameter param) { } }
Can we use `map().collect(Collectors.toList()` instead?
private Mono<Void> sendInternal(ServiceBusMessageBatch batch, ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_SENDER, "sendMessages"))); } if (Objects.isNull(batch)) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; if (batch.getMessages().isEmpty()) { logger.info("Cannot send an EventBatch that is empty."); return Mono.empty(); } logger.atInfo() .addKeyValue("batchSize", batch.getCount()) .log("Sending batch."); AtomicReference<Context> sharedContext = new AtomicReference<>(Context.NONE); final List<org.apache.qpid.proton.message.Message> messages = Collections.synchronizedList(new ArrayList<>()); batch.getMessages().forEach(serviceBusMessage -> { if (isTracingEnabled) { parentContext.set(serviceBusMessage.getContext()); if (sharedContext.get().equals(Context.NONE)) { sharedContext.set(tracerProvider.getSharedSpanBuilder(SERVICE_BASE_NAME, parentContext.get())); } tracerProvider.addSpanLinks(sharedContext.get().addData(SPAN_CONTEXT_KEY, serviceBusMessage.getContext())); } final org.apache.qpid.proton.message.Message message = messageSerializer.serialize(serviceBusMessage); final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); message.setMessageAnnotations(messageAnnotations); messages.add(message); }); if (isTracingEnabled) { final Context finalSharedContext = sharedContext.get().equals(Context.NONE) ? Context.NONE : sharedContext.get() .addData(ENTITY_PATH_KEY, entityName) .addData(HOST_NAME_KEY, connectionProcessor.getFullyQualifiedNamespace()) .addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); parentContext.set(tracerProvider.startSpan(AZ_TRACING_SERVICE_NAME, finalSharedContext, ProcessKind.SEND)); } final Mono<Void> sendMessage = getSendLink().flatMap(link -> { if (transactionContext != null && transactionContext.getTransactionId() != null) { final TransactionalState deliveryState = new TransactionalState(); deliveryState.setTxnId(new Binary(transactionContext.getTransactionId().array())); return messages.size() == 1 ? link.send(messages.get(0), deliveryState) : link.send(messages, deliveryState); } else { return messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages); } }); return withRetry(sendMessage, retryOptions, String.format("entityPath[%s], partitionId[%s]: Sending messages timed out.", entityName, batch.getCount())) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }).onErrorMap(this::mapError); }
batch.getMessages().forEach(serviceBusMessage -> {
private Mono<Void> sendInternal(ServiceBusMessageBatch batch, ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_SENDER, "sendMessages"))); } if (Objects.isNull(batch)) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; if (batch.getMessages().isEmpty()) { logger.info("Cannot send an EventBatch that is empty."); return Mono.empty(); } logger.atInfo() .addKeyValue("batchSize", batch.getCount()) .log("Sending batch."); AtomicReference<Context> sharedContext = new AtomicReference<>(Context.NONE); final List<org.apache.qpid.proton.message.Message> messages = Collections.synchronizedList(new ArrayList<>()); batch.getMessages().forEach(serviceBusMessage -> { if (isTracingEnabled) { parentContext.set(serviceBusMessage.getContext()); if (sharedContext.get().equals(Context.NONE)) { sharedContext.set(tracerProvider.getSharedSpanBuilder(SERVICE_BASE_NAME, parentContext.get())); } tracerProvider.addSpanLinks(sharedContext.get().addData(SPAN_CONTEXT_KEY, serviceBusMessage.getContext())); } final org.apache.qpid.proton.message.Message message = messageSerializer.serialize(serviceBusMessage); final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); message.setMessageAnnotations(messageAnnotations); messages.add(message); }); if (isTracingEnabled) { final Context finalSharedContext = sharedContext.get().equals(Context.NONE) ? Context.NONE : sharedContext.get() .addData(ENTITY_PATH_KEY, entityName) .addData(HOST_NAME_KEY, connectionProcessor.getFullyQualifiedNamespace()) .addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); parentContext.set(tracerProvider.startSpan(AZ_TRACING_SERVICE_NAME, finalSharedContext, ProcessKind.SEND)); } final Mono<Void> sendMessage = getSendLink().flatMap(link -> { if (transactionContext != null && transactionContext.getTransactionId() != null) { final TransactionalState deliveryState = new TransactionalState(); deliveryState.setTxnId(new Binary(transactionContext.getTransactionId().array())); return messages.size() == 1 ? link.send(messages.get(0), deliveryState) : link.send(messages, deliveryState); } else { return messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages); } }); return withRetry(sendMessage, retryOptions, String.format("entityPath[%s], partitionId[%s]: Sending messages timed out.", entityName, batch.getCount())) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }).onErrorMap(this::mapError); }
class ServiceBusSenderAsyncClient implements AutoCloseable { /** * The default maximum allowable size, in bytes, for a batch to be sent. */ static final int MAX_MESSAGE_LENGTH_BYTES = 256 * 1024; private static final String TRANSACTION_LINK_NAME = "coordinator"; private static final String AZ_TRACING_NAMESPACE_VALUE = "Microsoft.ServiceBus"; private static final CreateMessageBatchOptions DEFAULT_BATCH_OPTIONS = new CreateMessageBatchOptions(); private static final String SERVICE_BASE_NAME = "ServiceBus."; private final ClientLogger logger = new ClientLogger(ServiceBusSenderAsyncClient.class); private final AtomicReference<String> linkName = new AtomicReference<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final AmqpRetryOptions retryOptions; private final AmqpRetryPolicy retryPolicy; private final MessagingEntityType entityType; private final Runnable onClientClose; private final String entityName; private final ServiceBusConnectionProcessor connectionProcessor; private final String viaEntityName; /** * Creates a new instance of this {@link ServiceBusSenderAsyncClient} that sends messages to a Service Bus entity. */ ServiceBusSenderAsyncClient(String entityName, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose, String viaEntityName) { this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.entityName = Objects.requireNonNull(entityName, "'entityPath' cannot be null."); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = tracerProvider; this.retryPolicy = getRetryPolicy(retryOptions); this.entityType = entityType; this.viaEntityName = viaEntityName; this.onClientClose = onClientClose; } /** * Gets the fully qualified namespace. * * @return The fully qualified namespace. */ public String getFullyQualifiedNamespace() { return connectionProcessor.getFullyQualifiedNamespace(); } /** * Gets the name of the Service Bus resource. * * @return The name of the Service Bus resource. */ public String getEntityPath() { return entityName; } /** * Sends a message to a Service Bus queue or topic. * * @param message Message to be sent to Service Bus queue or topic. * * @return The {@link Mono} the finishes this operation on service bus resource. * * @throws NullPointerException if {@code message} is {@code null}. * @throws IllegalStateException if sender is already disposed. * @throws ServiceBusException if {@code message} is larger than the maximum allowed size of a single message or * the message could not be sent. */ public Mono<Void> sendMessage(ServiceBusMessage message) { if (Objects.isNull(message)) { return monoError(logger, new NullPointerException("'message' cannot be null.")); } return sendInternal(Flux.just(message), null); } /** * Sends a message to a Service Bus queue or topic. * * @param message Message to be sent to Service Bus queue or topic. * @param transactionContext to be set on batch message before sending to Service Bus. * * @return The {@link Mono} the finishes this operation on service bus resource. * * @throws NullPointerException if {@code message}, {@code transactionContext} or * {@code transactionContext.transactionId} is {@code null}. * @throws IllegalStateException if sender is already disposed. * @throws ServiceBusException if {@code message} is larger than the maximum allowed size of a single message or * the message could not be sent. */ public Mono<Void> sendMessage(ServiceBusMessage message, ServiceBusTransactionContext transactionContext) { if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return sendInternal(Flux.just(message), transactionContext); } /** * Sends a set of messages to a Service Bus queue or topic using a batched approach. If the size of messages * exceed the maximum size of a single batch, an exception will be triggered and the send will fail. * By default, the message size is the max amount allowed on the link. * * @param messages Messages to be sent to Service Bus queue or topic. * @param transactionContext to be set on batch message before sending to Service Bus. * * @return A {@link Mono} that completes when all messages have been sent to the Service Bus resource. * * @throws NullPointerException if {@code batch}, {@code transactionContext} or * {@code transactionContext.transactionId} is {@code null}. * @throws IllegalStateException if sender is already disposed. * @throws ServiceBusException if {@code messages} are larger than the maximum allowed size of a single message or * the message could not be sent. */ public Mono<Void> sendMessages(Iterable<ServiceBusMessage> messages, ServiceBusTransactionContext transactionContext) { if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return sendIterable(messages, transactionContext); } /** * Sends a set of messages to a Service Bus queue or topic using a batched approach. If the size of messages exceed * the maximum size of a single batch, an exception will be triggered and the send will fail. By default, the * message size is the max amount allowed on the link. * * @param messages Messages to be sent to Service Bus queue or topic. * * @return A {@link Mono} that completes when all messages have been sent to the Service Bus resource. * * @throws NullPointerException if {@code messages} is {@code null}. * @throws ServiceBusException if {@code messages} are larger than the maximum allowed size of a single message or * the message could not be sent. * @throws IllegalStateException if sender is already disposed. */ public Mono<Void> sendMessages(Iterable<ServiceBusMessage> messages) { return sendIterable(messages, null); } /** * Sends a message batch to the Azure Service Bus entity this sender is connected to. * * @param batch of messages which allows client to send maximum allowed size for a batch of messages. * * @return A {@link Mono} the finishes this operation on service bus resource. * * @throws NullPointerException if {@code batch} is {@code null}. * @throws ServiceBusException if the message batch could not be sent. * @throws IllegalStateException if sender is already disposed. */ public Mono<Void> sendMessages(ServiceBusMessageBatch batch) { return sendInternal(batch, null); } /** * Sends a message batch to the Azure Service Bus entity this sender is connected to. * * @param batch of messages which allows client to send maximum allowed size for a batch of messages. * @param transactionContext to be set on batch message before sending to Service Bus. * * @return A {@link Mono} the finishes this operation on service bus resource. * * @throws NullPointerException if {@code batch}, {@code transactionContext} or * {@code transactionContext.transactionId} is {@code null}. * @throws ServiceBusException if the message batch could not be sent. * @throws IllegalStateException if sender is already disposed. */ public Mono<Void> sendMessages(ServiceBusMessageBatch batch, ServiceBusTransactionContext transactionContext) { if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return sendInternal(batch, transactionContext); } /** * Creates a {@link ServiceBusMessageBatch} that can fit as many messages as the transport allows. * * @return A {@link ServiceBusMessageBatch} that can fit as many messages as the transport allows. * @throws ServiceBusException if the message batch could not be created. * @throws IllegalStateException if sender is already disposed. */ public Mono<ServiceBusMessageBatch> createMessageBatch() { return createMessageBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link ServiceBusMessageBatch} configured with the options specified. * * @param options A set of options used to configure the {@link ServiceBusMessageBatch}. * * @return A new {@link ServiceBusMessageBatch} configured with the given options. * @throws NullPointerException if {@code options} is null. * @throws ServiceBusException if the message batch could not be created. * @throws IllegalStateException if sender is already disposed. * @throws IllegalArgumentException if {@link CreateMessageBatchOptions * maximum allowed size. */ public Mono<ServiceBusMessageBatch> createMessageBatch(CreateMessageBatchOptions options) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_SENDER, "createMessageBatch"))); } if (Objects.isNull(options)) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } final int maxSize = options.getMaximumSizeInBytes(); return getSendLink().flatMap(link -> link.getLinkSize().flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (maxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateMessageBatchOptions.getMaximumSizeInBytes (%s bytes) is larger than the link size" + " (%s bytes).", maxSize, maximumLinkSize))); } final int batchSize = maxSize > 0 ? maxSize : maximumLinkSize; return Mono.just( new ServiceBusMessageBatch(batchSize, link::getErrorContext, tracerProvider, messageSerializer, entityName, getFullyQualifiedNamespace())); })).onErrorMap(this::mapError); } /** * Sends a scheduled message to the Azure Service Bus entity this sender is connected to. A scheduled message is * enqueued and made available to receivers only at the scheduled enqueue time. * * @param message Message to be sent to the Service Bus Queue. * @param scheduledEnqueueTime OffsetDateTime at which the message should appear in the Service Bus queue or topic. * @param transactionContext to be set on message before sending to Service Bus. * * @return The sequence number of the scheduled message which can be used to cancel the scheduling of the message. * * @throws NullPointerException if {@code message}, {@code scheduledEnqueueTime}, {@code transactionContext} or * {@code transactionContext.transactionID} is {@code null}. * @throws ServiceBusException If the message could not be scheduled. * @throws IllegalStateException if sender is already disposed. */ public Mono<Long> scheduleMessage(ServiceBusMessage message, OffsetDateTime scheduledEnqueueTime, ServiceBusTransactionContext transactionContext) { if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return scheduleMessageInternal(message, scheduledEnqueueTime, transactionContext); } /** * Sends a scheduled message to the Azure Service Bus entity this sender is connected to. A scheduled message is * enqueued and made available to receivers only at the scheduled enqueue time. * * @param message Message to be sent to the Service Bus Queue. * @param scheduledEnqueueTime OffsetDateTime at which the message should appear in the Service Bus queue or topic. * * @return The sequence number of the scheduled message which can be used to cancel the scheduling of the message. * * @throws NullPointerException if {@code message} or {@code scheduledEnqueueTime} is {@code null}. * @throws ServiceBusException If the message could not be scheduled. */ public Mono<Long> scheduleMessage(ServiceBusMessage message, OffsetDateTime scheduledEnqueueTime) { return scheduleMessageInternal(message, scheduledEnqueueTime, null); } /** * Sends a batch of scheduled messages to the Azure Service Bus entity this sender is connected to. A scheduled * message is enqueued and made available to receivers only at the scheduled enqueue time. * * @param messages Messages to be sent to the Service Bus queue or topic. * @param scheduledEnqueueTime OffsetDateTime at which the message should appear in the Service Bus queue or topic. * * @return Sequence numbers of the scheduled messages which can be used to cancel the messages. * * @throws NullPointerException If {@code messages} or {@code scheduledEnqueueTime} is {@code null}. * @throws ServiceBusException If the messages could not be scheduled. * @throws IllegalStateException if sender is already disposed. */ public Flux<Long> scheduleMessages(Iterable<ServiceBusMessage> messages, OffsetDateTime scheduledEnqueueTime) { return scheduleMessages(messages, scheduledEnqueueTime, null); } /** * Sends a scheduled messages to the Azure Service Bus entity this sender is connected to. A scheduled message is * enqueued and made available to receivers only at the scheduled enqueue time. * * @param messages Messages to be sent to the Service Bus Queue. * @param scheduledEnqueueTime OffsetDateTime at which the messages should appear in the Service Bus queue or topic. * @param transactionContext Transaction to associate with the operation. * * @return Sequence numbers of the scheduled messages which can be used to cancel the messages. * * @throws NullPointerException If {@code messages}, {@code scheduledEnqueueTime}, {@code transactionContext} or * {@code transactionContext.transactionId} is {@code null}. * @throws ServiceBusException If the messages could not be scheduled. * @throws IllegalStateException if sender is already disposed. */ public Flux<Long> scheduleMessages(Iterable<ServiceBusMessage> messages, OffsetDateTime scheduledEnqueueTime, ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return fluxError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_SENDER, "scheduleMessages"))); } if (Objects.isNull(messages)) { return fluxError(logger, new NullPointerException("'messages' cannot be null.")); } if (Objects.isNull(scheduledEnqueueTime)) { return fluxError(logger, new NullPointerException("'scheduledEnqueueTime' cannot be null.")); } return createMessageBatch() .map(messageBatch -> { int index = 0; for (ServiceBusMessage message : messages) { if (!messageBatch.tryAddMessage(message)) { final String error = String.format(Locale.US, "Messages exceed max allowed size for all the messages together. " + "Failed to add message at index '%s'.", index); throw logger.logExceptionAsError(new IllegalArgumentException(error)); } ++index; } return messageBatch; }) .flatMapMany(messageBatch -> connectionProcessor .flatMap(connection -> connection.getManagementNode(entityName, entityType)) .flatMapMany(managementNode -> managementNode.schedule(messageBatch.getMessages(), scheduledEnqueueTime, messageBatch.getMaxSizeInBytes(), linkName.get(), transactionContext)) ); } /** * Cancels the enqueuing of a scheduled message, if it was not already enqueued. * * @param sequenceNumber of the scheduled message to cancel. * * @return The {@link Mono} that finishes this operation on service bus resource. * * @throws IllegalArgumentException if {@code sequenceNumber} is negative. * @throws ServiceBusException If the messages could not be cancelled. * @throws IllegalStateException if sender is already disposed. */ public Mono<Void> cancelScheduledMessage(long sequenceNumber) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_SENDER, "cancelScheduledMessage"))); } if (sequenceNumber < 0) { return monoError(logger, new IllegalArgumentException("'sequenceNumber' cannot be negative.")); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityName, entityType)) .flatMap(managementNode -> managementNode.cancelScheduledMessages( Collections.singletonList(sequenceNumber), linkName.get())); } /** * Cancels the enqueuing of an already scheduled message, if it was not already enqueued. * * @param sequenceNumbers of the scheduled messages to cancel. * * @return The {@link Mono} that finishes this operation on service bus resource. * * @throws NullPointerException if {@code sequenceNumbers} is null. * @throws IllegalStateException if sender is already disposed. * @throws ServiceBusException if the scheduled messages cannot cancelled. */ public Mono<Void> cancelScheduledMessages(Iterable<Long> sequenceNumbers) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_SENDER, "cancelScheduledMessages"))); } if (Objects.isNull(sequenceNumbers)) { return monoError(logger, new NullPointerException("'messages' cannot be null.")); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityName, entityType)) .flatMap(managementNode -> managementNode.cancelScheduledMessages(sequenceNumbers, linkName.get())); } /** * Starts a new transaction on Service Bus. The {@link ServiceBusTransactionContext} should be passed along with * {@link ServiceBusReceivedMessage} all operations that needs to be in this transaction. * * @return A new {@link ServiceBusTransactionContext}. * * @throws IllegalStateException if sender is already disposed. * @throws ServiceBusException if a transaction cannot be created. * * @see ServiceBusReceiverAsyncClient */ public Mono<ServiceBusTransactionContext> createTransaction() { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_SENDER, "createTransaction"))); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.createTransaction()) .map(transaction -> new ServiceBusTransactionContext(transaction.getTransactionId())); } /** * Commits the transaction given {@link ServiceBusTransactionContext}. This will make a call to Service Bus. * * @param transactionContext to be committed. * * @return The {@link Mono} that finishes this operation on Service Bus resource. * * @throws IllegalStateException if sender is already disposed. * @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is null. * @throws ServiceBusException if the transaction could not be committed. * * @see ServiceBusReceiverAsyncClient */ public Mono<Void> commitTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_SENDER, "commitTransaction"))); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.commitTransaction(new AmqpTransaction( transactionContext.getTransactionId()))); } /** * Rollbacks the transaction given {@link ServiceBusTransactionContext}. This will make a call to Service Bus. * * @param transactionContext Transaction to rollback. * * @return The {@link Mono} that finishes this operation on the Service Bus resource. * * @throws IllegalStateException if sender is already disposed. * @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is null. * @throws ServiceBusException if the transaction could not be rolled back. * * @see ServiceBusReceiverAsyncClient */ public Mono<Void> rollbackTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_SENDER, "rollbackTransaction"))); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.rollbackTransaction(new AmqpTransaction( transactionContext.getTransactionId()))); } /** * Disposes of the {@link ServiceBusSenderAsyncClient}. If the client has a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } onClientClose.run(); } private Mono<Void> sendIterable(Iterable<ServiceBusMessage> messages, ServiceBusTransactionContext transaction) { if (Objects.isNull(messages)) { return monoError(logger, new NullPointerException("'messages' cannot be null.")); } return createMessageBatch().flatMap(messageBatch -> { StreamSupport.stream(messages.spliterator(), false) .forEach(message -> messageBatch.tryAddMessage(message)); return sendInternal(messageBatch, transaction); }); } private Mono<Long> scheduleMessageInternal(ServiceBusMessage message, OffsetDateTime scheduledEnqueueTime, ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_SENDER, "scheduleMessage"))); } if (Objects.isNull(message)) { return monoError(logger, new NullPointerException("'message' cannot be null.")); } if (Objects.isNull(scheduledEnqueueTime)) { return monoError(logger, new NullPointerException("'scheduledEnqueueTime' cannot be null.")); } return getSendLink() .flatMap(link -> link.getLinkSize().flatMap(size -> { int maxSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityName, entityType)) .flatMap(managementNode -> managementNode.schedule(Arrays.asList(message), scheduledEnqueueTime, maxSize, link.getLinkName(), transactionContext) .next()); })); } /** * Sends a message batch to the Azure Service Bus entity this sender is connected to. * @param batch of messages which allows client to send maximum allowed size for a batch of messages. * @param transactionContext to be set on batch message before sending to Service Bus. * * @return A {@link Mono} the finishes this operation on service bus resource. */ private Mono<Void> sendInternal(Flux<ServiceBusMessage> messages, ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_SENDER, "sendMessage"))); } return withRetry(getSendLink(), retryOptions, "Failed to create send link " + linkName) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateMessageBatchOptions batchOptions = new CreateMessageBatchOptions() .setMaximumSizeInBytes(batchSize); return messages.collect(new AmqpMessageCollector(batchOptions, 1, link::getErrorContext, tracerProvider, messageSerializer, entityName, link.getHostname())); }) .flatMap(list -> sendInternalBatch(Flux.fromIterable(list), transactionContext))) .onErrorMap(this::mapError); } private Mono<Void> sendInternalBatch(Flux<ServiceBusMessageBatch> eventBatches, ServiceBusTransactionContext transactionContext) { return eventBatches .flatMap(messageBatch -> sendInternal(messageBatch, transactionContext)) .then() .doOnError(error -> logger.error("Error sending batch.", error)); } private Mono<AmqpSendLink> getSendLink() { return connectionProcessor .flatMap(connection -> { if (!CoreUtils.isNullOrEmpty(viaEntityName)) { return connection.createSendLink("VIA-".concat(viaEntityName), viaEntityName, retryOptions, entityName); } else { return connection.createSendLink(entityName, entityName, retryOptions, null); } }) .doOnNext(next -> linkName.compareAndSet(null, next.getLinkName())); } private Throwable mapError(Throwable throwable) { if (!(throwable instanceof ServiceBusException)) { return new ServiceBusException(throwable, ServiceBusErrorSource.SEND); } return throwable; } private static class AmqpMessageCollector implements Collector<ServiceBusMessage, List<ServiceBusMessageBatch>, List<ServiceBusMessageBatch>> { private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private final MessageSerializer serializer; private final String entityPath; private final String hostname; private volatile ServiceBusMessageBatch currentBatch; AmqpMessageCollector(CreateMessageBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider, MessageSerializer serializer, String entityPath, String hostname) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; this.serializer = serializer; this.entityPath = entityPath; this.hostname = hostname; currentBatch = new ServiceBusMessageBatch(maxMessageSize, contextProvider, tracerProvider, serializer, entityPath, hostname); } @Override public Supplier<List<ServiceBusMessageBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<ServiceBusMessageBatch>, ServiceBusMessage> accumulator() { return (list, event) -> { ServiceBusMessageBatch batch = currentBatch; if (batch.tryAddMessage(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, "EventData does not fit into maximum number of batches. '%s'", maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new ServiceBusMessageBatch(maxMessageSize, contextProvider, tracerProvider, serializer, entityPath, hostname); currentBatch.tryAddMessage(event); list.add(batch); }; } @Override public BinaryOperator<List<ServiceBusMessageBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<ServiceBusMessageBatch>, List<ServiceBusMessageBatch>> finisher() { return list -> { ServiceBusMessageBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
class ServiceBusSenderAsyncClient implements AutoCloseable { /** * The default maximum allowable size, in bytes, for a batch to be sent. */ static final int MAX_MESSAGE_LENGTH_BYTES = 256 * 1024; private static final String TRANSACTION_LINK_NAME = "coordinator"; private static final String AZ_TRACING_NAMESPACE_VALUE = "Microsoft.ServiceBus"; private static final CreateMessageBatchOptions DEFAULT_BATCH_OPTIONS = new CreateMessageBatchOptions(); private static final String SERVICE_BASE_NAME = "ServiceBus."; private final ClientLogger logger = new ClientLogger(ServiceBusSenderAsyncClient.class); private final AtomicReference<String> linkName = new AtomicReference<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final AmqpRetryOptions retryOptions; private final AmqpRetryPolicy retryPolicy; private final MessagingEntityType entityType; private final Runnable onClientClose; private final String entityName; private final ServiceBusConnectionProcessor connectionProcessor; private final String viaEntityName; /** * Creates a new instance of this {@link ServiceBusSenderAsyncClient} that sends messages to a Service Bus entity. */ ServiceBusSenderAsyncClient(String entityName, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose, String viaEntityName) { this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.entityName = Objects.requireNonNull(entityName, "'entityPath' cannot be null."); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = tracerProvider; this.retryPolicy = getRetryPolicy(retryOptions); this.entityType = entityType; this.viaEntityName = viaEntityName; this.onClientClose = onClientClose; } /** * Gets the fully qualified namespace. * * @return The fully qualified namespace. */ public String getFullyQualifiedNamespace() { return connectionProcessor.getFullyQualifiedNamespace(); } /** * Gets the name of the Service Bus resource. * * @return The name of the Service Bus resource. */ public String getEntityPath() { return entityName; } /** * Sends a message to a Service Bus queue or topic. * * @param message Message to be sent to Service Bus queue or topic. * * @return The {@link Mono} the finishes this operation on service bus resource. * * @throws NullPointerException if {@code message} is {@code null}. * @throws IllegalStateException if sender is already disposed. * @throws ServiceBusException if {@code message} is larger than the maximum allowed size of a single message or * the message could not be sent. */ public Mono<Void> sendMessage(ServiceBusMessage message) { if (Objects.isNull(message)) { return monoError(logger, new NullPointerException("'message' cannot be null.")); } return sendInternal(Flux.just(message), null); } /** * Sends a message to a Service Bus queue or topic. * * @param message Message to be sent to Service Bus queue or topic. * @param transactionContext to be set on batch message before sending to Service Bus. * * @return The {@link Mono} the finishes this operation on service bus resource. * * @throws NullPointerException if {@code message}, {@code transactionContext} or * {@code transactionContext.transactionId} is {@code null}. * @throws IllegalStateException if sender is already disposed. * @throws ServiceBusException if {@code message} is larger than the maximum allowed size of a single message or * the message could not be sent. */ public Mono<Void> sendMessage(ServiceBusMessage message, ServiceBusTransactionContext transactionContext) { if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return sendInternal(Flux.just(message), transactionContext); } /** * Sends a set of messages to a Service Bus queue or topic using a batched approach. If the size of messages * exceed the maximum size of a single batch, an exception will be triggered and the send will fail. * By default, the message size is the max amount allowed on the link. * * @param messages Messages to be sent to Service Bus queue or topic. * @param transactionContext to be set on batch message before sending to Service Bus. * * @return A {@link Mono} that completes when all messages have been sent to the Service Bus resource. * * @throws NullPointerException if {@code batch}, {@code transactionContext} or * {@code transactionContext.transactionId} is {@code null}. * @throws IllegalStateException if sender is already disposed. * @throws ServiceBusException if {@code messages} are larger than the maximum allowed size of a single message or * the message could not be sent. */ public Mono<Void> sendMessages(Iterable<ServiceBusMessage> messages, ServiceBusTransactionContext transactionContext) { if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return sendIterable(messages, transactionContext); } /** * Sends a set of messages to a Service Bus queue or topic using a batched approach. If the size of messages exceed * the maximum size of a single batch, an exception will be triggered and the send will fail. By default, the * message size is the max amount allowed on the link. * * @param messages Messages to be sent to Service Bus queue or topic. * * @return A {@link Mono} that completes when all messages have been sent to the Service Bus resource. * * @throws NullPointerException if {@code messages} is {@code null}. * @throws ServiceBusException if {@code messages} are larger than the maximum allowed size of a single message or * the message could not be sent. * @throws IllegalStateException if sender is already disposed. */ public Mono<Void> sendMessages(Iterable<ServiceBusMessage> messages) { return sendIterable(messages, null); } /** * Sends a message batch to the Azure Service Bus entity this sender is connected to. * * @param batch of messages which allows client to send maximum allowed size for a batch of messages. * * @return A {@link Mono} the finishes this operation on service bus resource. * * @throws NullPointerException if {@code batch} is {@code null}. * @throws ServiceBusException if the message batch could not be sent. * @throws IllegalStateException if sender is already disposed. */ public Mono<Void> sendMessages(ServiceBusMessageBatch batch) { return sendInternal(batch, null); } /** * Sends a message batch to the Azure Service Bus entity this sender is connected to. * * @param batch of messages which allows client to send maximum allowed size for a batch of messages. * @param transactionContext to be set on batch message before sending to Service Bus. * * @return A {@link Mono} the finishes this operation on service bus resource. * * @throws NullPointerException if {@code batch}, {@code transactionContext} or * {@code transactionContext.transactionId} is {@code null}. * @throws ServiceBusException if the message batch could not be sent. * @throws IllegalStateException if sender is already disposed. */ public Mono<Void> sendMessages(ServiceBusMessageBatch batch, ServiceBusTransactionContext transactionContext) { if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return sendInternal(batch, transactionContext); } /** * Creates a {@link ServiceBusMessageBatch} that can fit as many messages as the transport allows. * * @return A {@link ServiceBusMessageBatch} that can fit as many messages as the transport allows. * @throws ServiceBusException if the message batch could not be created. * @throws IllegalStateException if sender is already disposed. */ public Mono<ServiceBusMessageBatch> createMessageBatch() { return createMessageBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link ServiceBusMessageBatch} configured with the options specified. * * @param options A set of options used to configure the {@link ServiceBusMessageBatch}. * * @return A new {@link ServiceBusMessageBatch} configured with the given options. * @throws NullPointerException if {@code options} is null. * @throws ServiceBusException if the message batch could not be created. * @throws IllegalStateException if sender is already disposed. * @throws IllegalArgumentException if {@link CreateMessageBatchOptions * maximum allowed size. */ public Mono<ServiceBusMessageBatch> createMessageBatch(CreateMessageBatchOptions options) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_SENDER, "createMessageBatch"))); } if (Objects.isNull(options)) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } final int maxSize = options.getMaximumSizeInBytes(); return getSendLink().flatMap(link -> link.getLinkSize().flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (maxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateMessageBatchOptions.getMaximumSizeInBytes (%s bytes) is larger than the link size" + " (%s bytes).", maxSize, maximumLinkSize))); } final int batchSize = maxSize > 0 ? maxSize : maximumLinkSize; return Mono.just( new ServiceBusMessageBatch(batchSize, link::getErrorContext, tracerProvider, messageSerializer, entityName, getFullyQualifiedNamespace())); })).onErrorMap(this::mapError); } /** * Sends a scheduled message to the Azure Service Bus entity this sender is connected to. A scheduled message is * enqueued and made available to receivers only at the scheduled enqueue time. * * @param message Message to be sent to the Service Bus Queue. * @param scheduledEnqueueTime OffsetDateTime at which the message should appear in the Service Bus queue or topic. * @param transactionContext to be set on message before sending to Service Bus. * * @return The sequence number of the scheduled message which can be used to cancel the scheduling of the message. * * @throws NullPointerException if {@code message}, {@code scheduledEnqueueTime}, {@code transactionContext} or * {@code transactionContext.transactionID} is {@code null}. * @throws ServiceBusException If the message could not be scheduled. * @throws IllegalStateException if sender is already disposed. */ public Mono<Long> scheduleMessage(ServiceBusMessage message, OffsetDateTime scheduledEnqueueTime, ServiceBusTransactionContext transactionContext) { if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return scheduleMessageInternal(message, scheduledEnqueueTime, transactionContext); } /** * Sends a scheduled message to the Azure Service Bus entity this sender is connected to. A scheduled message is * enqueued and made available to receivers only at the scheduled enqueue time. * * @param message Message to be sent to the Service Bus Queue. * @param scheduledEnqueueTime OffsetDateTime at which the message should appear in the Service Bus queue or topic. * * @return The sequence number of the scheduled message which can be used to cancel the scheduling of the message. * * @throws NullPointerException if {@code message} or {@code scheduledEnqueueTime} is {@code null}. * @throws ServiceBusException If the message could not be scheduled. */ public Mono<Long> scheduleMessage(ServiceBusMessage message, OffsetDateTime scheduledEnqueueTime) { return scheduleMessageInternal(message, scheduledEnqueueTime, null); } /** * Sends a batch of scheduled messages to the Azure Service Bus entity this sender is connected to. A scheduled * message is enqueued and made available to receivers only at the scheduled enqueue time. * * @param messages Messages to be sent to the Service Bus queue or topic. * @param scheduledEnqueueTime OffsetDateTime at which the message should appear in the Service Bus queue or topic. * * @return Sequence numbers of the scheduled messages which can be used to cancel the messages. * * @throws NullPointerException If {@code messages} or {@code scheduledEnqueueTime} is {@code null}. * @throws ServiceBusException If the messages could not be scheduled. * @throws IllegalStateException if sender is already disposed. */ public Flux<Long> scheduleMessages(Iterable<ServiceBusMessage> messages, OffsetDateTime scheduledEnqueueTime) { return scheduleMessages(messages, scheduledEnqueueTime, null); } /** * Sends a scheduled messages to the Azure Service Bus entity this sender is connected to. A scheduled message is * enqueued and made available to receivers only at the scheduled enqueue time. * * @param messages Messages to be sent to the Service Bus Queue. * @param scheduledEnqueueTime OffsetDateTime at which the messages should appear in the Service Bus queue or topic. * @param transactionContext Transaction to associate with the operation. * * @return Sequence numbers of the scheduled messages which can be used to cancel the messages. * * @throws NullPointerException If {@code messages}, {@code scheduledEnqueueTime}, {@code transactionContext} or * {@code transactionContext.transactionId} is {@code null}. * @throws ServiceBusException If the messages could not be scheduled. * @throws IllegalStateException if sender is already disposed. */ public Flux<Long> scheduleMessages(Iterable<ServiceBusMessage> messages, OffsetDateTime scheduledEnqueueTime, ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return fluxError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_SENDER, "scheduleMessages"))); } if (Objects.isNull(messages)) { return fluxError(logger, new NullPointerException("'messages' cannot be null.")); } if (Objects.isNull(scheduledEnqueueTime)) { return fluxError(logger, new NullPointerException("'scheduledEnqueueTime' cannot be null.")); } return createMessageBatch() .map(messageBatch -> { int index = 0; for (ServiceBusMessage message : messages) { if (!messageBatch.tryAddMessage(message)) { final String error = String.format(Locale.US, "Messages exceed max allowed size for all the messages together. " + "Failed to add message at index '%s'.", index); throw logger.logExceptionAsError(new IllegalArgumentException(error)); } ++index; } return messageBatch; }) .flatMapMany(messageBatch -> connectionProcessor .flatMap(connection -> connection.getManagementNode(entityName, entityType)) .flatMapMany(managementNode -> managementNode.schedule(messageBatch.getMessages(), scheduledEnqueueTime, messageBatch.getMaxSizeInBytes(), linkName.get(), transactionContext)) ); } /** * Cancels the enqueuing of a scheduled message, if it was not already enqueued. * * @param sequenceNumber of the scheduled message to cancel. * * @return The {@link Mono} that finishes this operation on service bus resource. * * @throws IllegalArgumentException if {@code sequenceNumber} is negative. * @throws ServiceBusException If the messages could not be cancelled. * @throws IllegalStateException if sender is already disposed. */ public Mono<Void> cancelScheduledMessage(long sequenceNumber) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_SENDER, "cancelScheduledMessage"))); } if (sequenceNumber < 0) { return monoError(logger, new IllegalArgumentException("'sequenceNumber' cannot be negative.")); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityName, entityType)) .flatMap(managementNode -> managementNode.cancelScheduledMessages( Collections.singletonList(sequenceNumber), linkName.get())); } /** * Cancels the enqueuing of an already scheduled message, if it was not already enqueued. * * @param sequenceNumbers of the scheduled messages to cancel. * * @return The {@link Mono} that finishes this operation on service bus resource. * * @throws NullPointerException if {@code sequenceNumbers} is null. * @throws IllegalStateException if sender is already disposed. * @throws ServiceBusException if the scheduled messages cannot cancelled. */ public Mono<Void> cancelScheduledMessages(Iterable<Long> sequenceNumbers) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_SENDER, "cancelScheduledMessages"))); } if (Objects.isNull(sequenceNumbers)) { return monoError(logger, new NullPointerException("'messages' cannot be null.")); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityName, entityType)) .flatMap(managementNode -> managementNode.cancelScheduledMessages(sequenceNumbers, linkName.get())); } /** * Starts a new transaction on Service Bus. The {@link ServiceBusTransactionContext} should be passed along with * {@link ServiceBusReceivedMessage} all operations that needs to be in this transaction. * * @return A new {@link ServiceBusTransactionContext}. * * @throws IllegalStateException if sender is already disposed. * @throws ServiceBusException if a transaction cannot be created. * * @see ServiceBusReceiverAsyncClient */ public Mono<ServiceBusTransactionContext> createTransaction() { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_SENDER, "createTransaction"))); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.createTransaction()) .map(transaction -> new ServiceBusTransactionContext(transaction.getTransactionId())); } /** * Commits the transaction given {@link ServiceBusTransactionContext}. This will make a call to Service Bus. * * @param transactionContext to be committed. * * @return The {@link Mono} that finishes this operation on Service Bus resource. * * @throws IllegalStateException if sender is already disposed. * @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is null. * @throws ServiceBusException if the transaction could not be committed. * * @see ServiceBusReceiverAsyncClient */ public Mono<Void> commitTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_SENDER, "commitTransaction"))); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.commitTransaction(new AmqpTransaction( transactionContext.getTransactionId()))); } /** * Rollbacks the transaction given {@link ServiceBusTransactionContext}. This will make a call to Service Bus. * * @param transactionContext Transaction to rollback. * * @return The {@link Mono} that finishes this operation on the Service Bus resource. * * @throws IllegalStateException if sender is already disposed. * @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is null. * @throws ServiceBusException if the transaction could not be rolled back. * * @see ServiceBusReceiverAsyncClient */ public Mono<Void> rollbackTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_SENDER, "rollbackTransaction"))); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.rollbackTransaction(new AmqpTransaction( transactionContext.getTransactionId()))); } /** * Disposes of the {@link ServiceBusSenderAsyncClient}. If the client has a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } onClientClose.run(); } private Mono<Void> sendIterable(Iterable<ServiceBusMessage> messages, ServiceBusTransactionContext transaction) { if (Objects.isNull(messages)) { return monoError(logger, new NullPointerException("'messages' cannot be null.")); } return createMessageBatch().flatMap(messageBatch -> { StreamSupport.stream(messages.spliterator(), false) .forEach(message -> messageBatch.tryAddMessage(message)); return sendInternal(messageBatch, transaction); }); } private Mono<Long> scheduleMessageInternal(ServiceBusMessage message, OffsetDateTime scheduledEnqueueTime, ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_SENDER, "scheduleMessage"))); } if (Objects.isNull(message)) { return monoError(logger, new NullPointerException("'message' cannot be null.")); } if (Objects.isNull(scheduledEnqueueTime)) { return monoError(logger, new NullPointerException("'scheduledEnqueueTime' cannot be null.")); } return getSendLink() .flatMap(link -> link.getLinkSize().flatMap(size -> { int maxSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityName, entityType)) .flatMap(managementNode -> managementNode.schedule(Arrays.asList(message), scheduledEnqueueTime, maxSize, link.getLinkName(), transactionContext) .next()); })); } /** * Sends a message batch to the Azure Service Bus entity this sender is connected to. * @param batch of messages which allows client to send maximum allowed size for a batch of messages. * @param transactionContext to be set on batch message before sending to Service Bus. * * @return A {@link Mono} the finishes this operation on service bus resource. */ private Mono<Void> sendInternal(Flux<ServiceBusMessage> messages, ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_SENDER, "sendMessage"))); } return withRetry(getSendLink(), retryOptions, "Failed to create send link " + linkName) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateMessageBatchOptions batchOptions = new CreateMessageBatchOptions() .setMaximumSizeInBytes(batchSize); return messages.collect(new AmqpMessageCollector(batchOptions, 1, link::getErrorContext, tracerProvider, messageSerializer, entityName, link.getHostname())); }) .flatMap(list -> sendInternalBatch(Flux.fromIterable(list), transactionContext))) .onErrorMap(this::mapError); } private Mono<Void> sendInternalBatch(Flux<ServiceBusMessageBatch> eventBatches, ServiceBusTransactionContext transactionContext) { return eventBatches .flatMap(messageBatch -> sendInternal(messageBatch, transactionContext)) .then() .doOnError(error -> logger.error("Error sending batch.", error)); } private Mono<AmqpSendLink> getSendLink() { return connectionProcessor .flatMap(connection -> { if (!CoreUtils.isNullOrEmpty(viaEntityName)) { return connection.createSendLink("VIA-".concat(viaEntityName), viaEntityName, retryOptions, entityName); } else { return connection.createSendLink(entityName, entityName, retryOptions, null); } }) .doOnNext(next -> linkName.compareAndSet(null, next.getLinkName())); } private Throwable mapError(Throwable throwable) { if (!(throwable instanceof ServiceBusException)) { return new ServiceBusException(throwable, ServiceBusErrorSource.SEND); } return throwable; } private static class AmqpMessageCollector implements Collector<ServiceBusMessage, List<ServiceBusMessageBatch>, List<ServiceBusMessageBatch>> { private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private final MessageSerializer serializer; private final String entityPath; private final String hostname; private volatile ServiceBusMessageBatch currentBatch; AmqpMessageCollector(CreateMessageBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider, MessageSerializer serializer, String entityPath, String hostname) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; this.serializer = serializer; this.entityPath = entityPath; this.hostname = hostname; currentBatch = new ServiceBusMessageBatch(maxMessageSize, contextProvider, tracerProvider, serializer, entityPath, hostname); } @Override public Supplier<List<ServiceBusMessageBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<ServiceBusMessageBatch>, ServiceBusMessage> accumulator() { return (list, event) -> { ServiceBusMessageBatch batch = currentBatch; if (batch.tryAddMessage(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, "EventData does not fit into maximum number of batches. '%s'", maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new ServiceBusMessageBatch(maxMessageSize, contextProvider, tracerProvider, serializer, entityPath, hostname); currentBatch.tryAddMessage(event); list.add(batch); }; } @Override public BinaryOperator<List<ServiceBusMessageBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<ServiceBusMessageBatch>, List<ServiceBusMessageBatch>> finisher() { return list -> { ServiceBusMessageBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
Use try-with-resources instead of try/finally
private Mono<ConfidentialClientApplication> getConfidentialClientApplication() { return Mono.defer(() -> { if (clientId == null) { return Mono.error(LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication."))); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; IClientCredential credential; if (clientSecret != null) { credential = ClientCredentialFactory.createFromSecret(clientSecret); } else if (certificate != null || certificatePath != null) { try { if (certificatePassword == null) { byte[] pemCertificateBytes = getCertificateBytes(); List<X509Certificate> x509CertificateList = CertificateUtil.publicKeyFromPem(pemCertificateBytes); PrivateKey privateKey = CertificateUtil.privateKeyFromPem(pemCertificateBytes); if (x509CertificateList.size() == 1) { credential = ClientCredentialFactory.createFromCertificate( privateKey, x509CertificateList.get(0)); } else { credential = ClientCredentialFactory.createFromCertificateChain( privateKey, x509CertificateList); } } else { try (InputStream pfxCertificateStream = getCertificateInputStream()) { credential = ClientCredentialFactory.createFromCertificate(pfxCertificateStream, certificatePassword); } } } catch (IOException | GeneralSecurityException e) { return Mono.error(LOGGER.logExceptionAsError(new RuntimeException( "Failed to parse the certificate for the credential: " + e.getMessage(), e))); } } else if (clientAssertionSupplier != null) { credential = ClientCredentialFactory.createFromClientAssertion(clientAssertionSupplier.get()); } else { return Mono.error(LOGGER.logExceptionAsError( new IllegalArgumentException("Must provide client secret or client certificate path." + " To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, credential); try { applicationBuilder = applicationBuilder.authority(authorityUrl) .validateAuthority(options.getAuthorityValidation()); } catch (MalformedURLException e) { return Mono.error(LOGGER.logExceptionAsWarning(new IllegalStateException(e))); } applicationBuilder.sendX5c(options.isIncludeX5c()); initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl() .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); applicationBuilder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { return Mono.error(LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t))); } } if (options.getRegionalAuthority() != null) { if (options.getRegionalAuthority() == RegionalAuthority.AUTO_DISCOVER_REGION) { applicationBuilder.autoDetectRegion(true); } else { applicationBuilder.azureRegion(options.getRegionalAuthority().toString()); } } ConfidentialClientApplication confidentialClientApplication = applicationBuilder.build(); return tokenCache != null ? tokenCache.registerCache() .map(ignored -> confidentialClientApplication) : Mono.just(confidentialClientApplication); }); }
try (InputStream pfxCertificateStream = getCertificateInputStream()) {
private Mono<ConfidentialClientApplication> getConfidentialClientApplication() { return Mono.defer(() -> { if (clientId == null) { return Mono.error(LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication."))); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; IClientCredential credential; if (clientSecret != null) { credential = ClientCredentialFactory.createFromSecret(clientSecret); } else if (certificate != null || certificatePath != null) { try { if (certificatePassword == null) { byte[] pemCertificateBytes = getCertificateBytes(); List<X509Certificate> x509CertificateList = CertificateUtil.publicKeyFromPem(pemCertificateBytes); PrivateKey privateKey = CertificateUtil.privateKeyFromPem(pemCertificateBytes); if (x509CertificateList.size() == 1) { credential = ClientCredentialFactory.createFromCertificate( privateKey, x509CertificateList.get(0)); } else { credential = ClientCredentialFactory.createFromCertificateChain( privateKey, x509CertificateList); } } else { try (InputStream pfxCertificateStream = getCertificateInputStream()) { credential = ClientCredentialFactory.createFromCertificate(pfxCertificateStream, certificatePassword); } } } catch (IOException | GeneralSecurityException e) { return Mono.error(LOGGER.logExceptionAsError(new RuntimeException( "Failed to parse the certificate for the credential: " + e.getMessage(), e))); } } else if (clientAssertionSupplier != null) { credential = ClientCredentialFactory.createFromClientAssertion(clientAssertionSupplier.get()); } else { return Mono.error(LOGGER.logExceptionAsError( new IllegalArgumentException("Must provide client secret or client certificate path." + " To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, credential); try { applicationBuilder = applicationBuilder.authority(authorityUrl) .validateAuthority(options.getAuthorityValidation()); } catch (MalformedURLException e) { return Mono.error(LOGGER.logExceptionAsWarning(new IllegalStateException(e))); } applicationBuilder.sendX5c(options.isIncludeX5c()); initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl() .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); applicationBuilder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { return Mono.error(LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t))); } } if (options.getRegionalAuthority() != null) { if (options.getRegionalAuthority() == RegionalAuthority.AUTO_DISCOVER_REGION) { applicationBuilder.autoDetectRegion(true); } else { applicationBuilder.azureRegion(options.getRegionalAuthority().toString()); } } ConfidentialClientApplication confidentialClientApplication = applicationBuilder.build(); return tokenCache != null ? tokenCache.registerCache() .map(ignored -> confidentialClientApplication) : Mono.just(confidentialClientApplication); }); }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private static final String WINDOWS_STARTER = "cmd.exe"; private static final String LINUX_MAC_STARTER = "/bin/sh"; private static final String WINDOWS_SWITCHER = "/c"; private static final String LINUX_MAC_SWITCHER = "-c"; private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized"; private static final Pattern LINUX_MAC_PROCESS_ERROR_MESSAGE = Pattern.compile("(.*)az:(.*)not found"); private static final String DEFAULT_WINDOWS_SYSTEM_ROOT = System.getenv("SystemRoot"); private static final String DEFAULT_WINDOWS_PS_EXECUTABLE = "pwsh.exe"; private static final String LEGACY_WINDOWS_PS_EXECUTABLE = "powershell.exe"; private static final String DEFAULT_LINUX_PS_EXECUTABLE = "pwsh"; private static final String DEFAULT_MAC_LINUX_PATH = "/bin/"; private static final Duration REFRESH_OFFSET = Duration.ofMinutes(5); private static final String IDENTITY_ENDPOINT_VERSION = "2019-08-01"; private static final String MSI_ENDPOINT_VERSION = "2017-09-01"; private static final String ADFS_TENANT = "adfs"; private static final String HTTP_LOCALHOST = "http: private static final String SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION = "2019-07-01-preview"; private static final ClientLogger LOGGER = new ClientLogger(IdentityClient.class); private static final Pattern ACCESS_TOKEN_PATTERN = Pattern.compile("\"accessToken\": \"(.*?)(\"|$)"); private static final Pattern TRAILING_FORWARD_SLASHES = Pattern.compile("/+$"); private final IdentityClientOptions options; private final String tenantId; private final String clientId; private final String resourceId; private final String clientSecret; private final String clientAssertionFilePath; private final InputStream certificate; private final String certificatePath; private final Supplier<String> clientAssertionSupplier; private final String certificatePassword; private HttpPipelineAdapter httpPipelineAdapter; private final SynchronizedAccessor<PublicClientApplication> publicClientApplicationAccessor; private final SynchronizedAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor; private final SynchronizedAccessor<String> clientAssertionAccessor; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param clientSecret the client secret of the application. * @param resourceId the resource ID of the application * @param certificatePath the path to the PKCS12 or PEM certificate of the application. * @param certificate the PKCS12 or PEM certificate of the application. * @param certificatePassword the password protecting the PFX certificate. * @param isSharedTokenCacheCredential Indicate whether the credential is * {@link com.azure.identity.SharedTokenCacheCredential} or not. * @param clientAssertionTimeout the timeout to use for the client assertion. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, String clientSecret, String certificatePath, String clientAssertionFilePath, String resourceId, Supplier<String> clientAssertionSupplier, InputStream certificate, String certificatePassword, boolean isSharedTokenCacheCredential, Duration clientAssertionTimeout, IdentityClientOptions options) { if (tenantId == null) { tenantId = "organizations"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.resourceId = resourceId; this.clientSecret = clientSecret; this.clientAssertionFilePath = clientAssertionFilePath; this.certificatePath = certificatePath; this.certificate = certificate; this.certificatePassword = certificatePassword; this.clientAssertionSupplier = clientAssertionSupplier; this.options = options; this.publicClientApplicationAccessor = new SynchronizedAccessor<>(() -> getPublicClientApplication(isSharedTokenCacheCredential)); this.confidentialClientApplicationAccessor = new SynchronizedAccessor<>(() -> getConfidentialClientApplication()); this.clientAssertionAccessor = clientAssertionTimeout == null ? new SynchronizedAccessor<>(() -> parseClientAssertion(), Duration.ofMinutes(5)) : new SynchronizedAccessor<>(() -> parseClientAssertion(), clientAssertionTimeout); } private Mono<String> parseClientAssertion() { return Mono.fromCallable(() -> { if (clientAssertionFilePath != null) { byte[] encoded = Files.readAllBytes(Paths.get(clientAssertionFilePath)); return new String(encoded, StandardCharsets.UTF_8); } else { throw LOGGER.logExceptionAsError(new IllegalStateException( "Client Assertion File Path is not provided." + " It should be provided to authenticate with client assertion." )); } }); } private Mono<PublicClientApplication> getPublicClientApplication(boolean sharedTokenCacheCredential) { return Mono.defer(() -> { if (clientId == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl) .validateAuthority(options.getAuthorityValidation()); } catch (MalformedURLException e) { throw LOGGER.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { publicClientApplicationBuilder.httpClient(httpPipelineAdapter); } else { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { publicClientApplicationBuilder.executorService(options.getExecutorService()); } if (!options.isCp1Disabled()) { Set<String> set = new HashSet<>(1); set.add("CP1"); publicClientApplicationBuilder.clientCapabilities(set); } return Mono.just(publicClientApplicationBuilder); }).flatMap(builder -> { TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl() .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); builder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t)); } } PublicClientApplication publicClientApplication = builder.build(); return tokenCache != null ? tokenCache.registerCache() .map(ignored -> publicClientApplication) : Mono.just(publicClientApplication); }); } public Mono<MsalToken> authenticateWithIntelliJ(TokenRequestContext request) { try { IntelliJCacheAccessor cacheAccessor = new IntelliJCacheAccessor(options.getIntelliJKeePassDatabasePath()); IntelliJAuthMethodDetails authDetails; try { authDetails = cacheAccessor.getAuthDetailsIfAvailable(); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available.", e))); } if (authDetails == null) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available." + " Please log in with Azure Tools for IntelliJ plugin in the IDE."))); } String authType = authDetails.getAuthMethod(); if ("SP".equalsIgnoreCase(authType)) { Map<String, String> spDetails = cacheAccessor .getIntellijServicePrincipalDetails(authDetails.getCredFilePath()); String authorityUrl = spDetails.get("authURL") + spDetails.get("tenant"); try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(spDetails.get("client"), ClientCredentialFactory.createFromSecret(spDetails.get("key"))) .authority(authorityUrl).validateAuthority(options.getAuthorityValidation()); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } } else if ("DC".equalsIgnoreCase(authType)) { LOGGER.verbose("IntelliJ Authentication => Device Code Authentication scheme detected in Azure Tools" + " for IntelliJ Plugin."); if (isADFSTenant()) { LOGGER.verbose("IntelliJ Authentication => The input tenant is detected to be ADFS and" + " the ADFS tenants are not supported via IntelliJ Authentication currently."); return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJCredential " + "authentication unavailable. ADFS tenant/authorities are not supported."))); } try { JsonNode intelliJCredentials = cacheAccessor.getDeviceCodeCredentials(); String refreshToken = intelliJCredentials.get("refreshToken").textValue(); RefreshTokenParameters.RefreshTokenParametersBuilder refreshTokenParametersBuilder = RefreshTokenParameters.builder(new HashSet<>(request.getScopes()), refreshToken); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); refreshTokenParametersBuilder.claims(customClaimRequest); } return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(refreshTokenParametersBuilder.build())) .map(MsalToken::new)); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e)); } } else { LOGGER.verbose("IntelliJ Authentication = > Only Service Principal and Device Code Authentication" + " schemes are currently supported via IntelliJ Credential currently. Please ensure you used one" + " of those schemes from Azure Tools for IntelliJ plugin."); return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available." + " Please login with Azure Tools for IntelliJ plugin in the IDE."))); } } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { StringBuilder azCommand = new StringBuilder("az account get-access-token --output json --resource "); String scopes = ScopeUtil.scopesToResource(request.getScopes()); try { ScopeUtil.validateScope(scopes); } catch (IllegalArgumentException ex) { return Mono.error(LOGGER.logExceptionAsError(ex)); } azCommand.append(scopes); String tenant = IdentityUtil.resolveTenantId(null, request, options); if (!CoreUtils.isNullOrEmpty(tenant)) { azCommand.append("--tenant ").append(tenant); } AccessToken token; try { String starter; String switcher; if (isWindowsPlatform()) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, azCommand.toString()); String workingDirectory = getSafeWorkingDirectory(); if (workingDirectory != null) { builder.directory(new File(workingDirectory)); } else { throw LOGGER.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be" + " found to execute CLI command from. To mitigate this issue, please refer to the troubleshooting " + " guidelines here at https: } builder.redirectErrorStream(true); Process process = builder.start(); StringBuilder output = new StringBuilder(); try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8.name()))) { String line; while (true) { line = reader.readLine(); if (line == null) { break; } if (line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || LINUX_MAC_PROCESS_ERROR_MESSAGE.matcher(line).matches()) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable. Azure CLI not installed." + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } output.append(line); } } String processOutput = output.toString(); process.waitFor(10, TimeUnit.SECONDS); if (process.exitValue() != 0) { if (processOutput.length() > 0) { String redactedOutput = redactInfo(processOutput); if (redactedOutput.contains("az login") || redactedOutput.contains("az account set")) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable." + " Please run 'az login' to set up account. To further mitigate this" + " issue, please refer to the troubleshooting guidelines here at " + "https: } throw LOGGER.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null)); } else { throw LOGGER.logExceptionAsError( new ClientAuthenticationException("Failed to invoke Azure CLI ", null)); } } LOGGER.verbose("Azure CLI Authentication => A token response was received from Azure CLI, deserializing the" + " response into an Access Token."); Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME) .atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException | InterruptedException e) { throw LOGGER.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(e instanceof CredentialUnavailableException ? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e) : LOGGER.logExceptionAsError(e)); } return Mono.just(token); } /** * Asynchronously acquire a token from Active Directory with Azure Power Shell. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzurePowerShell(TokenRequestContext request) { List<CredentialUnavailableException> exceptions = new ArrayList<>(2); PowershellManager defaultPowerShellManager = new PowershellManager(Platform.isWindows() ? DEFAULT_WINDOWS_PS_EXECUTABLE : DEFAULT_LINUX_PS_EXECUTABLE); PowershellManager legacyPowerShellManager = Platform.isWindows() ? new PowershellManager(LEGACY_WINDOWS_PS_EXECUTABLE) : null; List<PowershellManager> powershellManagers = new ArrayList<>(2); powershellManagers.add(defaultPowerShellManager); if (legacyPowerShellManager != null) { powershellManagers.add(legacyPowerShellManager); } return Flux.fromIterable(powershellManagers) .flatMap(powershellManager -> getAccessTokenFromPowerShell(request, powershellManager) .onErrorResume(t -> { if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) { return Mono.error(new ClientAuthenticationException( "Azure Powershell authentication failed. Error Details: " + t.getMessage() + ". To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: null, t)); } exceptions.add((CredentialUnavailableException) t); return Mono.empty(); }), 1) .next() .switchIfEmpty(Mono.defer(() -> { CredentialUnavailableException last = exceptions.get(exceptions.size() - 1); for (int z = exceptions.size() - 2; z >= 0; z--) { CredentialUnavailableException current = exceptions.get(z); last = new CredentialUnavailableException("Azure PowerShell authentication failed using default" + "powershell(pwsh) with following error: " + current.getMessage() + "\r\n" + "Azure PowerShell authentication failed using powershell-core(powershell)" + " with following error: " + last.getMessage(), last.getCause()); } return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, (last))); })); } /** * Asynchronously acquire a token from Active Directory with Azure PowerShell. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithOBO(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> confidentialClient.acquireToken(OnBehalfOfParameters .builder(new HashSet<>(request.getScopes()), options.getUserAssertion()) .tenant(IdentityUtil.resolveTenantId(tenantId, request, options)) .build())) .map(MsalToken::new)); } private Mono<AccessToken> getAccessTokenFromPowerShell(TokenRequestContext request, PowershellManager powershellManager) { return powershellManager.initSession() .flatMap(manager -> { String azAccountsCommand = "Import-Module Az.Accounts -MinimumVersion 2.2.0 -PassThru"; return manager.runCommand(azAccountsCommand) .flatMap(output -> { if (output.contains("The specified module 'Az.Accounts' with version '2.2.0' was not loaded " + "because no valid module file")) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Az.Account module with version >= 2.2.0 is not installed. It needs to be installed to" + " use Azure PowerShell Credential."))); } LOGGER.verbose("Az.accounts module was found installed."); StringBuilder accessTokenCommand = new StringBuilder("Get-AzAccessToken -ResourceUrl "); accessTokenCommand.append(ScopeUtil.scopesToResource(request.getScopes())); accessTokenCommand.append(" | ConvertTo-Json"); String command = accessTokenCommand.toString(); LOGGER.verbose("Azure Powershell Authentication => Executing the command `%s` in Azure " + "Powershell to retrieve the Access Token.", accessTokenCommand); return manager.runCommand(accessTokenCommand.toString()) .flatMap(out -> { if (out.contains("Run Connect-AzAccount to login")) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Run Connect-AzAccount to login to Azure account in PowerShell."))); } try { LOGGER.verbose("Azure Powershell Authentication => Attempting to deserialize the " + "received response from Azure Powershell."); Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(out, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("Token"); String time = objectMap.get("ExpiresOn"); OffsetDateTime expiresOn = OffsetDateTime.parse(time) .withOffsetSameInstant(ZoneOffset.UTC); return Mono.just(new AccessToken(accessToken, expiresOn)); } catch (IOException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Encountered error when deserializing response from Azure Power Shell.", e))); } }); }); }).doFinally(ignored -> powershellManager.close()); } /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClient(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { ClientCredentialParameters.ClientCredentialParametersBuilder builder = ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); return confidentialClient.acquireToken(builder.build()); } )).map(MsalToken::new); } private HttpPipeline setupPipeline(HttpClient httpClient) { List<HttpPipelinePolicy> policies = new ArrayList<>(); HttpLogOptions httpLogOptions = new HttpLogOptions(); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RetryPolicy()); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder().httpClient(httpClient) .policies(policies.toArray(new HttpPipelinePolicy[0])).build(); } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { UserNamePasswordParameters.UserNamePasswordParametersBuilder userNamePasswordParametersBuilder = UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest .formatAsClaimsRequest(request.getClaims()); userNamePasswordParametersBuilder.claims(customClaimRequest); } userNamePasswordParametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); return pc.acquireToken(userNamePasswordParametersBuilder.build()); } )).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with username and " + "password. To mitigate this issue, please refer to the troubleshooting guidelines " + "here at https: null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @param account the account used to log in to acquire the last token * @return a Publisher that emits an AccessToken */ @SuppressWarnings("deprecation") public Mono<MsalToken> authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); parametersBuilder.forceRefresh(true); } if (account != null) { parametersBuilder = parametersBuilder.account(account); } parametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); try { return pc.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET))) .switchIfEmpty(Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder forceParametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())).forceRefresh(true); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest .formatAsClaimsRequest(request.getClaims()); forceParametersBuilder.claims(customClaimRequest); } if (account != null) { forceParametersBuilder = forceParametersBuilder.account(account); } forceParametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); try { return pc.acquireTokenSilently(forceParametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new))); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ @SuppressWarnings("deprecation") public Mono<AccessToken> authenticateWithConfidentialClientCache(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())) .tenant(IdentityUtil.resolveTenantId(tenantId, request, options)); try { return confidentialClient.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(ar -> (AccessToken) new MsalToken(ar)) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET)))); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return publicClientApplicationAccessor.getValue().flatMap(pc -> Mono.fromFuture(() -> { DeviceCodeFlowParameters.DeviceCodeFlowParametersBuilder parametersBuilder = DeviceCodeFlowParameters.builder( new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept( new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } return pc.acquireToken(parametersBuilder.build()); }).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with device code", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with Visual Studio cached refresh token. * * @param request the details of the token request * @return a Publisher that emits an AccessToken. */ public Mono<MsalToken> authenticateWithVsCodeCredential(TokenRequestContext request, String cloud) { if (isADFSTenant()) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("VsCodeCredential " + "authentication unavailable. ADFS tenant/authorities are not supported. " + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } VisualStudioCacheAccessor accessor = new VisualStudioCacheAccessor(); String credential = null; try { credential = accessor.getCredentials("VS Code Azure", cloud); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e)); } RefreshTokenParameters.RefreshTokenParametersBuilder parametersBuilder = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), credential); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parametersBuilder.build())) .onErrorResume(t -> { if (t instanceof MsalInteractionRequiredException) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("Failed to acquire token with" + " VS code credential." + " To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } return Mono.error(new ClientAuthenticationException("Failed to acquire token with" + " VS code credential", null, t)); }) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { AuthorizationCodeParameters.AuthorizationCodeParametersBuilder parametersBuilder = AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } Mono<IAuthenticationResult> acquireToken; if (clientSecret != null) { acquireToken = confidentialClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build()))); } else { acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build()))); } return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with authorization code", null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @param redirectUrl the redirect URL to listen on and receive security code * @param loginHint the username suggestion to pre-fill the login page's username/email address field * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, Integer port, String redirectUrl, String loginHint) { URI redirectUri; String redirect; if (port != null) { redirect = HTTP_LOCALHOST + ":" + port; } else if (redirectUrl != null) { redirect = redirectUrl; } else { redirect = HTTP_LOCALHOST; } try { redirectUri = new URI(redirect); } catch (URISyntaxException e) { return Mono.error(LOGGER.logExceptionAsError(new RuntimeException(e))); } InteractiveRequestParameters.InteractiveRequestParametersBuilder builder = InteractiveRequestParameters.builder(redirectUri) .scopes(new HashSet<>(request.getScopes())) .prompt(Prompt.SELECT_ACCOUNT) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); builder.claims(customClaimRequest); } if (loginHint != null) { builder.loginHint(loginHint); } Mono<IAuthenticationResult> acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(builder.build()))); return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with Interactive Browser Authentication.", null, t)).map(MsalToken::new); } /** * Gets token from shared token cache * */ public Mono<MsalToken> authenticateWithSharedTokenCache(TokenRequestContext request, String username) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.getAccounts()) .onErrorMap(t -> new CredentialUnavailableException( "Cannot get accounts from token cache. Error: " + t.getMessage(), t)) .flatMap(set -> { IAccount requestedAccount; Map<String, IAccount> accounts = new HashMap<>(); if (set.isEmpty()) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("SharedTokenCacheCredential " + "authentication unavailable. No accounts were found in the cache."))); } for (IAccount cached : set) { if (username == null || username.equals(cached.username())) { if (!accounts.containsKey(cached.homeAccountId())) { accounts.put(cached.homeAccountId(), cached); } } } if (accounts.isEmpty()) { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. No account matching the specified username: %s was " + "found in the cache.", username))); } else if (accounts.size() > 1) { if (username == null) { return Mono.error(new RuntimeException("SharedTokenCacheCredential authentication " + "unavailable. Multiple accounts were found in the cache. Use username and " + "tenant id to disambiguate.")); } else { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. Multiple accounts matching the specified username: " + "%s were found in the cache.", username))); } } else { requestedAccount = accounts.values().iterator().next(); } return authenticateWithPublicClientCache(request, requestedAccount); })); } /** * Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToArcManagedIdentityEndpoint(String identityEndpoint, TokenRequestContext request) { return Mono.fromCallable(() -> { HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(ScopeUtil.scopesToResource(request.getScopes()), StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode("2019-11-01", StandardCharsets.UTF_8.name())); URL url = new URL(String.format("%s?%s", identityEndpoint, payload)); String secretKey = null; try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); } catch (IOException e) { if (connection == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to initialize " + "Http URL connection to the endpoint.", null, e)); } int status = connection.getResponseCode(); if (status != 401) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException(String.format("Expected a 401" + " Unauthorized response from Azure Arc Managed Identity Endpoint, received: %d", status), null, e)); } String realm = connection.getHeaderField("WWW-Authenticate"); if (realm == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a value" + " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint", null)); } int separatorIndex = realm.indexOf("="); if (separatorIndex == -1) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a correct value" + " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint", null)); } String secretKeyPath = realm.substring(separatorIndex + 1); secretKey = new String(Files.readAllBytes(Paths.get(secretKeyPath)), StandardCharsets.UTF_8); } finally { if (connection != null) { connection.disconnect(); } } if (secretKey == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a secret value" + " in the response from Azure Arc Managed Identity Endpoint", null)); } try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Authorization", String.format("Basic %s", secretKey)); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner scanner = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = scanner.hasNext() ? scanner.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithExchangeToken(TokenRequestContext request) { return clientAssertionAccessor.getValue() .flatMap(assertionToken -> Mono.fromCallable(() -> { String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId + "/oauth2/v2.0/token"; StringBuilder urlParametersBuilder = new StringBuilder(); urlParametersBuilder.append("client_assertion="); urlParametersBuilder.append(assertionToken); urlParametersBuilder.append("&client_assertion_type=urn:ietf:params:oauth:client-assertion-type" + ":jwt-bearer"); urlParametersBuilder.append("&client_id="); urlParametersBuilder.append(clientId); urlParametersBuilder.append("&grant_type=client_credentials"); urlParametersBuilder.append("&scope="); urlParametersBuilder.append(URLEncoder.encode(request.getScopes().get(0), StandardCharsets.UTF_8.name())); String urlParams = urlParametersBuilder.toString(); byte[] postData = urlParams.getBytes(StandardCharsets.UTF_8); int postDataLength = postData.length; HttpURLConnection connection = null; URL url = new URL(authorityUrl); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("POST"); connection.setRequestProperty("Content-Type", "application/x-www-form-urlencoded"); connection.setRequestProperty("Content-Length", Integer.toString(postDataLength)); connection.setDoOutput(true); try (DataOutputStream outputStream = new DataOutputStream(connection.getOutputStream())) { outputStream.write(postData); } connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } })); } /** * Asynchronously acquire a token from the Azure Service Fabric Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param identityHeader the identity header to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToServiceFabricManagedIdentityEndpoint(String identityEndpoint, String identityHeader, String thumbprint, TokenRequestContext request) { return Mono.fromCallable(() -> { HttpsURLConnection connection = null; String endpoint = identityEndpoint; String headerValue = identityHeader; String endpointVersion = SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION; String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } try { URL url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpsURLConnection) url.openConnection(); IdentitySslUtil.addTrustedCertificateThumbprint(connection, thumbprint, LOGGER); connection.setRequestMethod("GET"); if (headerValue != null) { connection.setRequestProperty("Secret", headerValue); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param identityHeader the identity header to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String identityEndpoint, String identityHeader, TokenRequestContext request) { return Mono.fromCallable(() -> { String endpoint; String headerValue; String endpointVersion; endpoint = identityEndpoint; headerValue = identityHeader; endpointVersion = IDENTITY_ENDPOINT_VERSION; String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } try { URL url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (headerValue != null) { if (IDENTITY_ENDPOINT_VERSION.equals(endpointVersion)) { connection.setRequestProperty("X-IDENTITY-HEADER", headerValue); } else { connection.setRequestProperty("Secret", headerValue); } } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", StandardCharsets.UTF_8.name())); payload.append("&resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } } catch (IOException exception) { return Mono.error(exception); } String endpoint = TRAILING_FORWARD_SLASHES.matcher(options.getImdsAuthorityHost()).replaceAll("") + IdentityConstants.DEFAULT_IMDS_TOKENPATH; return checkIMDSAvailable(endpoint).flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw LOGGER.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode; try { responseCode = connection.getResponseCode(); } catch (Exception e) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } if (responseCode == 400) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established.", null)); } if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw LOGGER.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw LOGGER.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable(String endpoint) { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", StandardCharsets.UTF_8.name())); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("%s?%s", endpoint, payload)); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } catch (Exception e) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } private String getSafeWorkingDirectory() { if (isWindowsPlatform()) { if (CoreUtils.isNullOrEmpty(DEFAULT_WINDOWS_SYSTEM_ROOT)) { return null; } return DEFAULT_WINDOWS_SYSTEM_ROOT + "\\system32"; } else { return DEFAULT_MAC_LINUX_PATH; } } private boolean isWindowsPlatform() { return System.getProperty("os.name").contains("Windows"); } private String redactInfo(String input) { return ACCESS_TOKEN_PATTERN.matcher(input).replaceAll("****"); } void openUrl(String url) throws IOException { Runtime rt = Runtime.getRuntime(); String os = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (os.contains("win")) { rt.exec("rundll32 url.dll,FileProtocolHandler " + url); } else if (os.contains("mac")) { rt.exec("open " + url); } else if (os.contains("nix") || os.contains("nux")) { rt.exec("xdg-open " + url); } else { LOGGER.error("Browser could not be opened - please open {} in a browser on this device.", url); } } private CompletableFuture<IAuthenticationResult> getFailedCompletableFuture(Exception e) { CompletableFuture<IAuthenticationResult> completableFuture = new CompletableFuture<>(); completableFuture.completeExceptionally(e); return completableFuture; } private void initializeHttpPipelineAdapter() { HttpPipeline httpPipeline = options.getHttpPipeline(); if (httpPipeline != null) { httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline); } else { HttpClient httpClient = options.getHttpClient(); if (httpClient != null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient)); } else if (options.getProxyOptions() == null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault())); } } } /** * Get the configured tenant id. * * @return the tenant id. */ public String getTenantId() { return tenantId; } /** * Get the configured client id. * * @return the client id. */ public String getClientId() { return clientId; } /** * Get the configured identity client options. * * @return the client options. */ public IdentityClientOptions getIdentityClientOptions() { return options; } private boolean isADFSTenant() { return this.tenantId.equals(ADFS_TENANT); } private byte[] getCertificateBytes() throws IOException { if (certificatePath != null) { return Files.readAllBytes(Paths.get(certificatePath)); } else if (certificate != null) { ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); byte[] buffer = new byte[1024]; int read = certificate.read(buffer, 0, buffer.length); while (read != -1) { outputStream.write(buffer, 0, read); read = certificate.read(buffer, 0, buffer.length); } return outputStream.toByteArray(); } else { return new byte[0]; } } private InputStream getCertificateInputStream() throws IOException { if (certificatePath != null) { return new BufferedInputStream(new FileInputStream(certificatePath)); } else { return certificate; } } }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private static final String WINDOWS_STARTER = "cmd.exe"; private static final String LINUX_MAC_STARTER = "/bin/sh"; private static final String WINDOWS_SWITCHER = "/c"; private static final String LINUX_MAC_SWITCHER = "-c"; private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized"; private static final Pattern LINUX_MAC_PROCESS_ERROR_MESSAGE = Pattern.compile("(.*)az:(.*)not found"); private static final String DEFAULT_WINDOWS_SYSTEM_ROOT = System.getenv("SystemRoot"); private static final String DEFAULT_WINDOWS_PS_EXECUTABLE = "pwsh.exe"; private static final String LEGACY_WINDOWS_PS_EXECUTABLE = "powershell.exe"; private static final String DEFAULT_LINUX_PS_EXECUTABLE = "pwsh"; private static final String DEFAULT_MAC_LINUX_PATH = "/bin/"; private static final Duration REFRESH_OFFSET = Duration.ofMinutes(5); private static final String IDENTITY_ENDPOINT_VERSION = "2019-08-01"; private static final String MSI_ENDPOINT_VERSION = "2017-09-01"; private static final String ADFS_TENANT = "adfs"; private static final String HTTP_LOCALHOST = "http: private static final String SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION = "2019-07-01-preview"; private static final ClientLogger LOGGER = new ClientLogger(IdentityClient.class); private static final Pattern ACCESS_TOKEN_PATTERN = Pattern.compile("\"accessToken\": \"(.*?)(\"|$)"); private static final Pattern TRAILING_FORWARD_SLASHES = Pattern.compile("/+$"); private final IdentityClientOptions options; private final String tenantId; private final String clientId; private final String resourceId; private final String clientSecret; private final String clientAssertionFilePath; private final InputStream certificate; private final String certificatePath; private final Supplier<String> clientAssertionSupplier; private final String certificatePassword; private HttpPipelineAdapter httpPipelineAdapter; private final SynchronizedAccessor<PublicClientApplication> publicClientApplicationAccessor; private final SynchronizedAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor; private final SynchronizedAccessor<String> clientAssertionAccessor; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param clientSecret the client secret of the application. * @param resourceId the resource ID of the application * @param certificatePath the path to the PKCS12 or PEM certificate of the application. * @param certificate the PKCS12 or PEM certificate of the application. * @param certificatePassword the password protecting the PFX certificate. * @param isSharedTokenCacheCredential Indicate whether the credential is * {@link com.azure.identity.SharedTokenCacheCredential} or not. * @param clientAssertionTimeout the timeout to use for the client assertion. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, String clientSecret, String certificatePath, String clientAssertionFilePath, String resourceId, Supplier<String> clientAssertionSupplier, InputStream certificate, String certificatePassword, boolean isSharedTokenCacheCredential, Duration clientAssertionTimeout, IdentityClientOptions options) { if (tenantId == null) { tenantId = "organizations"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.resourceId = resourceId; this.clientSecret = clientSecret; this.clientAssertionFilePath = clientAssertionFilePath; this.certificatePath = certificatePath; this.certificate = certificate; this.certificatePassword = certificatePassword; this.clientAssertionSupplier = clientAssertionSupplier; this.options = options; this.publicClientApplicationAccessor = new SynchronizedAccessor<>(() -> getPublicClientApplication(isSharedTokenCacheCredential)); this.confidentialClientApplicationAccessor = new SynchronizedAccessor<>(() -> getConfidentialClientApplication()); this.clientAssertionAccessor = clientAssertionTimeout == null ? new SynchronizedAccessor<>(() -> parseClientAssertion(), Duration.ofMinutes(5)) : new SynchronizedAccessor<>(() -> parseClientAssertion(), clientAssertionTimeout); } private Mono<String> parseClientAssertion() { return Mono.fromCallable(() -> { if (clientAssertionFilePath != null) { byte[] encoded = Files.readAllBytes(Paths.get(clientAssertionFilePath)); return new String(encoded, StandardCharsets.UTF_8); } else { throw LOGGER.logExceptionAsError(new IllegalStateException( "Client Assertion File Path is not provided." + " It should be provided to authenticate with client assertion." )); } }); } private Mono<PublicClientApplication> getPublicClientApplication(boolean sharedTokenCacheCredential) { return Mono.defer(() -> { if (clientId == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl) .validateAuthority(options.getAuthorityValidation()); } catch (MalformedURLException e) { throw LOGGER.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { publicClientApplicationBuilder.httpClient(httpPipelineAdapter); } else { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { publicClientApplicationBuilder.executorService(options.getExecutorService()); } if (!options.isCp1Disabled()) { Set<String> set = new HashSet<>(1); set.add("CP1"); publicClientApplicationBuilder.clientCapabilities(set); } return Mono.just(publicClientApplicationBuilder); }).flatMap(builder -> { TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl() .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); builder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t)); } } PublicClientApplication publicClientApplication = builder.build(); return tokenCache != null ? tokenCache.registerCache() .map(ignored -> publicClientApplication) : Mono.just(publicClientApplication); }); } public Mono<MsalToken> authenticateWithIntelliJ(TokenRequestContext request) { try { IntelliJCacheAccessor cacheAccessor = new IntelliJCacheAccessor(options.getIntelliJKeePassDatabasePath()); IntelliJAuthMethodDetails authDetails; try { authDetails = cacheAccessor.getAuthDetailsIfAvailable(); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available.", e))); } if (authDetails == null) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available." + " Please log in with Azure Tools for IntelliJ plugin in the IDE."))); } String authType = authDetails.getAuthMethod(); if ("SP".equalsIgnoreCase(authType)) { Map<String, String> spDetails = cacheAccessor .getIntellijServicePrincipalDetails(authDetails.getCredFilePath()); String authorityUrl = spDetails.get("authURL") + spDetails.get("tenant"); try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(spDetails.get("client"), ClientCredentialFactory.createFromSecret(spDetails.get("key"))) .authority(authorityUrl).validateAuthority(options.getAuthorityValidation()); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } } else if ("DC".equalsIgnoreCase(authType)) { LOGGER.verbose("IntelliJ Authentication => Device Code Authentication scheme detected in Azure Tools" + " for IntelliJ Plugin."); if (isADFSTenant()) { LOGGER.verbose("IntelliJ Authentication => The input tenant is detected to be ADFS and" + " the ADFS tenants are not supported via IntelliJ Authentication currently."); return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJCredential " + "authentication unavailable. ADFS tenant/authorities are not supported."))); } try { JsonNode intelliJCredentials = cacheAccessor.getDeviceCodeCredentials(); String refreshToken = intelliJCredentials.get("refreshToken").textValue(); RefreshTokenParameters.RefreshTokenParametersBuilder refreshTokenParametersBuilder = RefreshTokenParameters.builder(new HashSet<>(request.getScopes()), refreshToken); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); refreshTokenParametersBuilder.claims(customClaimRequest); } return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(refreshTokenParametersBuilder.build())) .map(MsalToken::new)); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e)); } } else { LOGGER.verbose("IntelliJ Authentication = > Only Service Principal and Device Code Authentication" + " schemes are currently supported via IntelliJ Credential currently. Please ensure you used one" + " of those schemes from Azure Tools for IntelliJ plugin."); return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available." + " Please login with Azure Tools for IntelliJ plugin in the IDE."))); } } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { StringBuilder azCommand = new StringBuilder("az account get-access-token --output json --resource "); String scopes = ScopeUtil.scopesToResource(request.getScopes()); try { ScopeUtil.validateScope(scopes); } catch (IllegalArgumentException ex) { return Mono.error(LOGGER.logExceptionAsError(ex)); } azCommand.append(scopes); String tenant = IdentityUtil.resolveTenantId(null, request, options); if (!CoreUtils.isNullOrEmpty(tenant)) { azCommand.append("--tenant ").append(tenant); } AccessToken token; try { String starter; String switcher; if (isWindowsPlatform()) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, azCommand.toString()); String workingDirectory = getSafeWorkingDirectory(); if (workingDirectory != null) { builder.directory(new File(workingDirectory)); } else { throw LOGGER.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be" + " found to execute CLI command from. To mitigate this issue, please refer to the troubleshooting " + " guidelines here at https: } builder.redirectErrorStream(true); Process process = builder.start(); StringBuilder output = new StringBuilder(); try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8.name()))) { String line; while (true) { line = reader.readLine(); if (line == null) { break; } if (line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || LINUX_MAC_PROCESS_ERROR_MESSAGE.matcher(line).matches()) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable. Azure CLI not installed." + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } output.append(line); } } String processOutput = output.toString(); process.waitFor(10, TimeUnit.SECONDS); if (process.exitValue() != 0) { if (processOutput.length() > 0) { String redactedOutput = redactInfo(processOutput); if (redactedOutput.contains("az login") || redactedOutput.contains("az account set")) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable." + " Please run 'az login' to set up account. To further mitigate this" + " issue, please refer to the troubleshooting guidelines here at " + "https: } throw LOGGER.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null)); } else { throw LOGGER.logExceptionAsError( new ClientAuthenticationException("Failed to invoke Azure CLI ", null)); } } LOGGER.verbose("Azure CLI Authentication => A token response was received from Azure CLI, deserializing the" + " response into an Access Token."); Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME) .atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException | InterruptedException e) { throw LOGGER.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(e instanceof CredentialUnavailableException ? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e) : LOGGER.logExceptionAsError(e)); } return Mono.just(token); } /** * Asynchronously acquire a token from Active Directory with Azure Power Shell. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzurePowerShell(TokenRequestContext request) { List<CredentialUnavailableException> exceptions = new ArrayList<>(2); PowershellManager defaultPowerShellManager = new PowershellManager(Platform.isWindows() ? DEFAULT_WINDOWS_PS_EXECUTABLE : DEFAULT_LINUX_PS_EXECUTABLE); PowershellManager legacyPowerShellManager = Platform.isWindows() ? new PowershellManager(LEGACY_WINDOWS_PS_EXECUTABLE) : null; List<PowershellManager> powershellManagers = new ArrayList<>(2); powershellManagers.add(defaultPowerShellManager); if (legacyPowerShellManager != null) { powershellManagers.add(legacyPowerShellManager); } return Flux.fromIterable(powershellManagers) .flatMap(powershellManager -> getAccessTokenFromPowerShell(request, powershellManager) .onErrorResume(t -> { if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) { return Mono.error(new ClientAuthenticationException( "Azure Powershell authentication failed. Error Details: " + t.getMessage() + ". To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: null, t)); } exceptions.add((CredentialUnavailableException) t); return Mono.empty(); }), 1) .next() .switchIfEmpty(Mono.defer(() -> { CredentialUnavailableException last = exceptions.get(exceptions.size() - 1); for (int z = exceptions.size() - 2; z >= 0; z--) { CredentialUnavailableException current = exceptions.get(z); last = new CredentialUnavailableException("Azure PowerShell authentication failed using default" + "powershell(pwsh) with following error: " + current.getMessage() + "\r\n" + "Azure PowerShell authentication failed using powershell-core(powershell)" + " with following error: " + last.getMessage(), last.getCause()); } return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, (last))); })); } /** * Asynchronously acquire a token from Active Directory with Azure PowerShell. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithOBO(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> confidentialClient.acquireToken(OnBehalfOfParameters .builder(new HashSet<>(request.getScopes()), options.getUserAssertion()) .tenant(IdentityUtil.resolveTenantId(tenantId, request, options)) .build())) .map(MsalToken::new)); } private Mono<AccessToken> getAccessTokenFromPowerShell(TokenRequestContext request, PowershellManager powershellManager) { return powershellManager.initSession() .flatMap(manager -> { String azAccountsCommand = "Import-Module Az.Accounts -MinimumVersion 2.2.0 -PassThru"; return manager.runCommand(azAccountsCommand) .flatMap(output -> { if (output.contains("The specified module 'Az.Accounts' with version '2.2.0' was not loaded " + "because no valid module file")) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Az.Account module with version >= 2.2.0 is not installed. It needs to be installed to" + " use Azure PowerShell Credential."))); } LOGGER.verbose("Az.accounts module was found installed."); StringBuilder accessTokenCommand = new StringBuilder("Get-AzAccessToken -ResourceUrl "); accessTokenCommand.append(ScopeUtil.scopesToResource(request.getScopes())); accessTokenCommand.append(" | ConvertTo-Json"); String command = accessTokenCommand.toString(); LOGGER.verbose("Azure Powershell Authentication => Executing the command `%s` in Azure " + "Powershell to retrieve the Access Token.", accessTokenCommand); return manager.runCommand(accessTokenCommand.toString()) .flatMap(out -> { if (out.contains("Run Connect-AzAccount to login")) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Run Connect-AzAccount to login to Azure account in PowerShell."))); } try { LOGGER.verbose("Azure Powershell Authentication => Attempting to deserialize the " + "received response from Azure Powershell."); Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(out, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("Token"); String time = objectMap.get("ExpiresOn"); OffsetDateTime expiresOn = OffsetDateTime.parse(time) .withOffsetSameInstant(ZoneOffset.UTC); return Mono.just(new AccessToken(accessToken, expiresOn)); } catch (IOException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Encountered error when deserializing response from Azure Power Shell.", e))); } }); }); }).doFinally(ignored -> powershellManager.close()); } /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClient(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { ClientCredentialParameters.ClientCredentialParametersBuilder builder = ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); return confidentialClient.acquireToken(builder.build()); } )).map(MsalToken::new); } private HttpPipeline setupPipeline(HttpClient httpClient) { List<HttpPipelinePolicy> policies = new ArrayList<>(); HttpLogOptions httpLogOptions = new HttpLogOptions(); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RetryPolicy()); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder().httpClient(httpClient) .policies(policies.toArray(new HttpPipelinePolicy[0])).build(); } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { UserNamePasswordParameters.UserNamePasswordParametersBuilder userNamePasswordParametersBuilder = UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest .formatAsClaimsRequest(request.getClaims()); userNamePasswordParametersBuilder.claims(customClaimRequest); } userNamePasswordParametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); return pc.acquireToken(userNamePasswordParametersBuilder.build()); } )).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with username and " + "password. To mitigate this issue, please refer to the troubleshooting guidelines " + "here at https: null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @param account the account used to log in to acquire the last token * @return a Publisher that emits an AccessToken */ @SuppressWarnings("deprecation") public Mono<MsalToken> authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); parametersBuilder.forceRefresh(true); } if (account != null) { parametersBuilder = parametersBuilder.account(account); } parametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); try { return pc.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET))) .switchIfEmpty(Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder forceParametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())).forceRefresh(true); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest .formatAsClaimsRequest(request.getClaims()); forceParametersBuilder.claims(customClaimRequest); } if (account != null) { forceParametersBuilder = forceParametersBuilder.account(account); } forceParametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); try { return pc.acquireTokenSilently(forceParametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new))); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ @SuppressWarnings("deprecation") public Mono<AccessToken> authenticateWithConfidentialClientCache(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())) .tenant(IdentityUtil.resolveTenantId(tenantId, request, options)); try { return confidentialClient.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(ar -> (AccessToken) new MsalToken(ar)) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET)))); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return publicClientApplicationAccessor.getValue().flatMap(pc -> Mono.fromFuture(() -> { DeviceCodeFlowParameters.DeviceCodeFlowParametersBuilder parametersBuilder = DeviceCodeFlowParameters.builder( new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept( new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } return pc.acquireToken(parametersBuilder.build()); }).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with device code", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with Visual Studio cached refresh token. * * @param request the details of the token request * @return a Publisher that emits an AccessToken. */ public Mono<MsalToken> authenticateWithVsCodeCredential(TokenRequestContext request, String cloud) { if (isADFSTenant()) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("VsCodeCredential " + "authentication unavailable. ADFS tenant/authorities are not supported. " + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } VisualStudioCacheAccessor accessor = new VisualStudioCacheAccessor(); String credential = null; try { credential = accessor.getCredentials("VS Code Azure", cloud); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e)); } RefreshTokenParameters.RefreshTokenParametersBuilder parametersBuilder = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), credential); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parametersBuilder.build())) .onErrorResume(t -> { if (t instanceof MsalInteractionRequiredException) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("Failed to acquire token with" + " VS code credential." + " To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } return Mono.error(new ClientAuthenticationException("Failed to acquire token with" + " VS code credential", null, t)); }) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { AuthorizationCodeParameters.AuthorizationCodeParametersBuilder parametersBuilder = AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } Mono<IAuthenticationResult> acquireToken; if (clientSecret != null) { acquireToken = confidentialClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build()))); } else { acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build()))); } return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with authorization code", null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @param redirectUrl the redirect URL to listen on and receive security code * @param loginHint the username suggestion to pre-fill the login page's username/email address field * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, Integer port, String redirectUrl, String loginHint) { URI redirectUri; String redirect; if (port != null) { redirect = HTTP_LOCALHOST + ":" + port; } else if (redirectUrl != null) { redirect = redirectUrl; } else { redirect = HTTP_LOCALHOST; } try { redirectUri = new URI(redirect); } catch (URISyntaxException e) { return Mono.error(LOGGER.logExceptionAsError(new RuntimeException(e))); } InteractiveRequestParameters.InteractiveRequestParametersBuilder builder = InteractiveRequestParameters.builder(redirectUri) .scopes(new HashSet<>(request.getScopes())) .prompt(Prompt.SELECT_ACCOUNT) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); builder.claims(customClaimRequest); } if (loginHint != null) { builder.loginHint(loginHint); } Mono<IAuthenticationResult> acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(builder.build()))); return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with Interactive Browser Authentication.", null, t)).map(MsalToken::new); } /** * Gets token from shared token cache * */ public Mono<MsalToken> authenticateWithSharedTokenCache(TokenRequestContext request, String username) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.getAccounts()) .onErrorMap(t -> new CredentialUnavailableException( "Cannot get accounts from token cache. Error: " + t.getMessage(), t)) .flatMap(set -> { IAccount requestedAccount; Map<String, IAccount> accounts = new HashMap<>(); if (set.isEmpty()) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("SharedTokenCacheCredential " + "authentication unavailable. No accounts were found in the cache."))); } for (IAccount cached : set) { if (username == null || username.equals(cached.username())) { if (!accounts.containsKey(cached.homeAccountId())) { accounts.put(cached.homeAccountId(), cached); } } } if (accounts.isEmpty()) { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. No account matching the specified username: %s was " + "found in the cache.", username))); } else if (accounts.size() > 1) { if (username == null) { return Mono.error(new RuntimeException("SharedTokenCacheCredential authentication " + "unavailable. Multiple accounts were found in the cache. Use username and " + "tenant id to disambiguate.")); } else { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. Multiple accounts matching the specified username: " + "%s were found in the cache.", username))); } } else { requestedAccount = accounts.values().iterator().next(); } return authenticateWithPublicClientCache(request, requestedAccount); })); } /** * Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToArcManagedIdentityEndpoint(String identityEndpoint, TokenRequestContext request) { return Mono.fromCallable(() -> { HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(ScopeUtil.scopesToResource(request.getScopes()), StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode("2019-11-01", StandardCharsets.UTF_8.name())); URL url = new URL(String.format("%s?%s", identityEndpoint, payload)); String secretKey = null; try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); } catch (IOException e) { if (connection == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to initialize " + "Http URL connection to the endpoint.", null, e)); } int status = connection.getResponseCode(); if (status != 401) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException(String.format("Expected a 401" + " Unauthorized response from Azure Arc Managed Identity Endpoint, received: %d", status), null, e)); } String realm = connection.getHeaderField("WWW-Authenticate"); if (realm == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a value" + " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint", null)); } int separatorIndex = realm.indexOf("="); if (separatorIndex == -1) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a correct value" + " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint", null)); } String secretKeyPath = realm.substring(separatorIndex + 1); secretKey = new String(Files.readAllBytes(Paths.get(secretKeyPath)), StandardCharsets.UTF_8); } finally { if (connection != null) { connection.disconnect(); } } if (secretKey == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a secret value" + " in the response from Azure Arc Managed Identity Endpoint", null)); } try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Authorization", String.format("Basic %s", secretKey)); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner scanner = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = scanner.hasNext() ? scanner.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithExchangeToken(TokenRequestContext request) { return clientAssertionAccessor.getValue() .flatMap(assertionToken -> Mono.fromCallable(() -> { String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId + "/oauth2/v2.0/token"; StringBuilder urlParametersBuilder = new StringBuilder(); urlParametersBuilder.append("client_assertion="); urlParametersBuilder.append(assertionToken); urlParametersBuilder.append("&client_assertion_type=urn:ietf:params:oauth:client-assertion-type" + ":jwt-bearer"); urlParametersBuilder.append("&client_id="); urlParametersBuilder.append(clientId); urlParametersBuilder.append("&grant_type=client_credentials"); urlParametersBuilder.append("&scope="); urlParametersBuilder.append(URLEncoder.encode(request.getScopes().get(0), StandardCharsets.UTF_8.name())); String urlParams = urlParametersBuilder.toString(); byte[] postData = urlParams.getBytes(StandardCharsets.UTF_8); int postDataLength = postData.length; HttpURLConnection connection = null; URL url = new URL(authorityUrl); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("POST"); connection.setRequestProperty("Content-Type", "application/x-www-form-urlencoded"); connection.setRequestProperty("Content-Length", Integer.toString(postDataLength)); connection.setDoOutput(true); try (DataOutputStream outputStream = new DataOutputStream(connection.getOutputStream())) { outputStream.write(postData); } connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } })); } /** * Asynchronously acquire a token from the Azure Service Fabric Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param identityHeader the identity header to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToServiceFabricManagedIdentityEndpoint(String identityEndpoint, String identityHeader, String thumbprint, TokenRequestContext request) { return Mono.fromCallable(() -> { HttpsURLConnection connection = null; String endpoint = identityEndpoint; String headerValue = identityHeader; String endpointVersion = SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION; String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } try { URL url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpsURLConnection) url.openConnection(); IdentitySslUtil.addTrustedCertificateThumbprint(connection, thumbprint, LOGGER); connection.setRequestMethod("GET"); if (headerValue != null) { connection.setRequestProperty("Secret", headerValue); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param identityHeader the identity header to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String identityEndpoint, String identityHeader, TokenRequestContext request) { return Mono.fromCallable(() -> { String endpoint; String headerValue; String endpointVersion; endpoint = identityEndpoint; headerValue = identityHeader; endpointVersion = IDENTITY_ENDPOINT_VERSION; String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } try { URL url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (headerValue != null) { if (IDENTITY_ENDPOINT_VERSION.equals(endpointVersion)) { connection.setRequestProperty("X-IDENTITY-HEADER", headerValue); } else { connection.setRequestProperty("Secret", headerValue); } } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", StandardCharsets.UTF_8.name())); payload.append("&resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } } catch (IOException exception) { return Mono.error(exception); } String endpoint = TRAILING_FORWARD_SLASHES.matcher(options.getImdsAuthorityHost()).replaceAll("") + IdentityConstants.DEFAULT_IMDS_TOKENPATH; return checkIMDSAvailable(endpoint).flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw LOGGER.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode; try { responseCode = connection.getResponseCode(); } catch (Exception e) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } if (responseCode == 400) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established.", null)); } if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw LOGGER.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw LOGGER.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable(String endpoint) { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", StandardCharsets.UTF_8.name())); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("%s?%s", endpoint, payload)); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } catch (Exception e) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } private String getSafeWorkingDirectory() { if (isWindowsPlatform()) { if (CoreUtils.isNullOrEmpty(DEFAULT_WINDOWS_SYSTEM_ROOT)) { return null; } return DEFAULT_WINDOWS_SYSTEM_ROOT + "\\system32"; } else { return DEFAULT_MAC_LINUX_PATH; } } private boolean isWindowsPlatform() { return System.getProperty("os.name").contains("Windows"); } private String redactInfo(String input) { return ACCESS_TOKEN_PATTERN.matcher(input).replaceAll("****"); } void openUrl(String url) throws IOException { Runtime rt = Runtime.getRuntime(); String os = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (os.contains("win")) { rt.exec("rundll32 url.dll,FileProtocolHandler " + url); } else if (os.contains("mac")) { rt.exec("open " + url); } else if (os.contains("nix") || os.contains("nux")) { rt.exec("xdg-open " + url); } else { LOGGER.error("Browser could not be opened - please open {} in a browser on this device.", url); } } private CompletableFuture<IAuthenticationResult> getFailedCompletableFuture(Exception e) { CompletableFuture<IAuthenticationResult> completableFuture = new CompletableFuture<>(); completableFuture.completeExceptionally(e); return completableFuture; } private void initializeHttpPipelineAdapter() { HttpPipeline httpPipeline = options.getHttpPipeline(); if (httpPipeline != null) { httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline); } else { HttpClient httpClient = options.getHttpClient(); if (httpClient != null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient)); } else if (options.getProxyOptions() == null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault())); } } } /** * Get the configured tenant id. * * @return the tenant id. */ public String getTenantId() { return tenantId; } /** * Get the configured client id. * * @return the client id. */ public String getClientId() { return clientId; } /** * Get the configured identity client options. * * @return the client options. */ public IdentityClientOptions getIdentityClientOptions() { return options; } private boolean isADFSTenant() { return this.tenantId.equals(ADFS_TENANT); } private byte[] getCertificateBytes() throws IOException { if (certificatePath != null) { return Files.readAllBytes(Paths.get(certificatePath)); } else if (certificate != null) { ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); byte[] buffer = new byte[1024]; int read = certificate.read(buffer, 0, buffer.length); while (read != -1) { outputStream.write(buffer, 0, read); read = certificate.read(buffer, 0, buffer.length); } return outputStream.toByteArray(); } else { return new byte[0]; } } private InputStream getCertificateInputStream() throws IOException { if (certificatePath != null) { return new BufferedInputStream(new FileInputStream(certificatePath)); } else { return certificate; } } }
Remove a string concatenation by using two appends
public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { StringBuilder azCommand = new StringBuilder("az account get-access-token --output json --resource "); String scopes = ScopeUtil.scopesToResource(request.getScopes()); try { ScopeUtil.validateScope(scopes); } catch (IllegalArgumentException ex) { return Mono.error(LOGGER.logExceptionAsError(ex)); } azCommand.append(scopes); String tenant = IdentityUtil.resolveTenantId(null, request, options); if (!CoreUtils.isNullOrEmpty(tenant)) { azCommand.append("--tenant ").append(tenant); } AccessToken token; try { String starter; String switcher; if (isWindowsPlatform()) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, azCommand.toString()); String workingDirectory = getSafeWorkingDirectory(); if (workingDirectory != null) { builder.directory(new File(workingDirectory)); } else { throw LOGGER.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be" + " found to execute CLI command from. To mitigate this issue, please refer to the troubleshooting " + " guidelines here at https: } builder.redirectErrorStream(true); Process process = builder.start(); StringBuilder output = new StringBuilder(); try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8.name()))) { String line; while (true) { line = reader.readLine(); if (line == null) { break; } if (line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || LINUX_MAC_PROCESS_ERROR_MESSAGE.matcher(line).matches()) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable. Azure CLI not installed." + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } output.append(line); } } String processOutput = output.toString(); process.waitFor(10, TimeUnit.SECONDS); if (process.exitValue() != 0) { if (processOutput.length() > 0) { String redactedOutput = redactInfo(processOutput); if (redactedOutput.contains("az login") || redactedOutput.contains("az account set")) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable." + " Please run 'az login' to set up account. To further mitigate this" + " issue, please refer to the troubleshooting guidelines here at " + "https: } throw LOGGER.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null)); } else { throw LOGGER.logExceptionAsError( new ClientAuthenticationException("Failed to invoke Azure CLI ", null)); } } LOGGER.verbose("Azure CLI Authentication => A token response was received from Azure CLI, deserializing the" + " response into an Access Token."); Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME) .atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException | InterruptedException e) { throw LOGGER.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(e instanceof CredentialUnavailableException ? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e) : LOGGER.logExceptionAsError(e)); } return Mono.just(token); }
azCommand.append("--tenant ").append(tenant);
public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { StringBuilder azCommand = new StringBuilder("az account get-access-token --output json --resource "); String scopes = ScopeUtil.scopesToResource(request.getScopes()); try { ScopeUtil.validateScope(scopes); } catch (IllegalArgumentException ex) { return Mono.error(LOGGER.logExceptionAsError(ex)); } azCommand.append(scopes); String tenant = IdentityUtil.resolveTenantId(null, request, options); if (!CoreUtils.isNullOrEmpty(tenant)) { azCommand.append("--tenant ").append(tenant); } AccessToken token; try { String starter; String switcher; if (isWindowsPlatform()) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, azCommand.toString()); String workingDirectory = getSafeWorkingDirectory(); if (workingDirectory != null) { builder.directory(new File(workingDirectory)); } else { throw LOGGER.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be" + " found to execute CLI command from. To mitigate this issue, please refer to the troubleshooting " + " guidelines here at https: } builder.redirectErrorStream(true); Process process = builder.start(); StringBuilder output = new StringBuilder(); try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8.name()))) { String line; while (true) { line = reader.readLine(); if (line == null) { break; } if (line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || LINUX_MAC_PROCESS_ERROR_MESSAGE.matcher(line).matches()) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable. Azure CLI not installed." + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } output.append(line); } } String processOutput = output.toString(); process.waitFor(10, TimeUnit.SECONDS); if (process.exitValue() != 0) { if (processOutput.length() > 0) { String redactedOutput = redactInfo(processOutput); if (redactedOutput.contains("az login") || redactedOutput.contains("az account set")) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable." + " Please run 'az login' to set up account. To further mitigate this" + " issue, please refer to the troubleshooting guidelines here at " + "https: } throw LOGGER.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null)); } else { throw LOGGER.logExceptionAsError( new ClientAuthenticationException("Failed to invoke Azure CLI ", null)); } } LOGGER.verbose("Azure CLI Authentication => A token response was received from Azure CLI, deserializing the" + " response into an Access Token."); Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME) .atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException | InterruptedException e) { throw LOGGER.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(e instanceof CredentialUnavailableException ? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e) : LOGGER.logExceptionAsError(e)); } return Mono.just(token); }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private static final String WINDOWS_STARTER = "cmd.exe"; private static final String LINUX_MAC_STARTER = "/bin/sh"; private static final String WINDOWS_SWITCHER = "/c"; private static final String LINUX_MAC_SWITCHER = "-c"; private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized"; private static final Pattern LINUX_MAC_PROCESS_ERROR_MESSAGE = Pattern.compile("(.*)az:(.*)not found"); private static final String DEFAULT_WINDOWS_SYSTEM_ROOT = System.getenv("SystemRoot"); private static final String DEFAULT_WINDOWS_PS_EXECUTABLE = "pwsh.exe"; private static final String LEGACY_WINDOWS_PS_EXECUTABLE = "powershell.exe"; private static final String DEFAULT_LINUX_PS_EXECUTABLE = "pwsh"; private static final String DEFAULT_MAC_LINUX_PATH = "/bin/"; private static final Duration REFRESH_OFFSET = Duration.ofMinutes(5); private static final String IDENTITY_ENDPOINT_VERSION = "2019-08-01"; private static final String MSI_ENDPOINT_VERSION = "2017-09-01"; private static final String ADFS_TENANT = "adfs"; private static final String HTTP_LOCALHOST = "http: private static final String SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION = "2019-07-01-preview"; private static final ClientLogger LOGGER = new ClientLogger(IdentityClient.class); private static final Pattern ACCESS_TOKEN_PATTERN = Pattern.compile("\"accessToken\": \"(.*?)(\"|$)"); private static final Pattern TRAILING_FORWARD_SLASHES = Pattern.compile("/+$"); private final IdentityClientOptions options; private final String tenantId; private final String clientId; private final String resourceId; private final String clientSecret; private final String clientAssertionFilePath; private final InputStream certificate; private final String certificatePath; private final Supplier<String> clientAssertionSupplier; private final String certificatePassword; private HttpPipelineAdapter httpPipelineAdapter; private final SynchronizedAccessor<PublicClientApplication> publicClientApplicationAccessor; private final SynchronizedAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor; private final SynchronizedAccessor<String> clientAssertionAccessor; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param clientSecret the client secret of the application. * @param resourceId the resource ID of the application * @param certificatePath the path to the PKCS12 or PEM certificate of the application. * @param certificate the PKCS12 or PEM certificate of the application. * @param certificatePassword the password protecting the PFX certificate. * @param isSharedTokenCacheCredential Indicate whether the credential is * {@link com.azure.identity.SharedTokenCacheCredential} or not. * @param clientAssertionTimeout the timeout to use for the client assertion. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, String clientSecret, String certificatePath, String clientAssertionFilePath, String resourceId, Supplier<String> clientAssertionSupplier, InputStream certificate, String certificatePassword, boolean isSharedTokenCacheCredential, Duration clientAssertionTimeout, IdentityClientOptions options) { if (tenantId == null) { tenantId = "organizations"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.resourceId = resourceId; this.clientSecret = clientSecret; this.clientAssertionFilePath = clientAssertionFilePath; this.certificatePath = certificatePath; this.certificate = certificate; this.certificatePassword = certificatePassword; this.clientAssertionSupplier = clientAssertionSupplier; this.options = options; this.publicClientApplicationAccessor = new SynchronizedAccessor<>(() -> getPublicClientApplication(isSharedTokenCacheCredential)); this.confidentialClientApplicationAccessor = new SynchronizedAccessor<>(() -> getConfidentialClientApplication()); this.clientAssertionAccessor = clientAssertionTimeout == null ? new SynchronizedAccessor<>(() -> parseClientAssertion(), Duration.ofMinutes(5)) : new SynchronizedAccessor<>(() -> parseClientAssertion(), clientAssertionTimeout); } private Mono<ConfidentialClientApplication> getConfidentialClientApplication() { return Mono.defer(() -> { if (clientId == null) { return Mono.error(LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication."))); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; IClientCredential credential; if (clientSecret != null) { credential = ClientCredentialFactory.createFromSecret(clientSecret); } else if (certificate != null || certificatePath != null) { try { if (certificatePassword == null) { byte[] pemCertificateBytes = getCertificateBytes(); List<X509Certificate> x509CertificateList = CertificateUtil.publicKeyFromPem(pemCertificateBytes); PrivateKey privateKey = CertificateUtil.privateKeyFromPem(pemCertificateBytes); if (x509CertificateList.size() == 1) { credential = ClientCredentialFactory.createFromCertificate( privateKey, x509CertificateList.get(0)); } else { credential = ClientCredentialFactory.createFromCertificateChain( privateKey, x509CertificateList); } } else { try (InputStream pfxCertificateStream = getCertificateInputStream()) { credential = ClientCredentialFactory.createFromCertificate(pfxCertificateStream, certificatePassword); } } } catch (IOException | GeneralSecurityException e) { return Mono.error(LOGGER.logExceptionAsError(new RuntimeException( "Failed to parse the certificate for the credential: " + e.getMessage(), e))); } } else if (clientAssertionSupplier != null) { credential = ClientCredentialFactory.createFromClientAssertion(clientAssertionSupplier.get()); } else { return Mono.error(LOGGER.logExceptionAsError( new IllegalArgumentException("Must provide client secret or client certificate path." + " To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, credential); try { applicationBuilder = applicationBuilder.authority(authorityUrl) .validateAuthority(options.getAuthorityValidation()); } catch (MalformedURLException e) { return Mono.error(LOGGER.logExceptionAsWarning(new IllegalStateException(e))); } applicationBuilder.sendX5c(options.isIncludeX5c()); initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl() .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); applicationBuilder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { return Mono.error(LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t))); } } if (options.getRegionalAuthority() != null) { if (options.getRegionalAuthority() == RegionalAuthority.AUTO_DISCOVER_REGION) { applicationBuilder.autoDetectRegion(true); } else { applicationBuilder.azureRegion(options.getRegionalAuthority().toString()); } } ConfidentialClientApplication confidentialClientApplication = applicationBuilder.build(); return tokenCache != null ? tokenCache.registerCache() .map(ignored -> confidentialClientApplication) : Mono.just(confidentialClientApplication); }); } private Mono<String> parseClientAssertion() { return Mono.fromCallable(() -> { if (clientAssertionFilePath != null) { byte[] encoded = Files.readAllBytes(Paths.get(clientAssertionFilePath)); return new String(encoded, StandardCharsets.UTF_8); } else { throw LOGGER.logExceptionAsError(new IllegalStateException( "Client Assertion File Path is not provided." + " It should be provided to authenticate with client assertion." )); } }); } private Mono<PublicClientApplication> getPublicClientApplication(boolean sharedTokenCacheCredential) { return Mono.defer(() -> { if (clientId == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl) .validateAuthority(options.getAuthorityValidation()); } catch (MalformedURLException e) { throw LOGGER.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { publicClientApplicationBuilder.httpClient(httpPipelineAdapter); } else { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { publicClientApplicationBuilder.executorService(options.getExecutorService()); } if (!options.isCp1Disabled()) { Set<String> set = new HashSet<>(1); set.add("CP1"); publicClientApplicationBuilder.clientCapabilities(set); } return Mono.just(publicClientApplicationBuilder); }).flatMap(builder -> { TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl() .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); builder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t)); } } PublicClientApplication publicClientApplication = builder.build(); return tokenCache != null ? tokenCache.registerCache() .map(ignored -> publicClientApplication) : Mono.just(publicClientApplication); }); } public Mono<MsalToken> authenticateWithIntelliJ(TokenRequestContext request) { try { IntelliJCacheAccessor cacheAccessor = new IntelliJCacheAccessor(options.getIntelliJKeePassDatabasePath()); IntelliJAuthMethodDetails authDetails; try { authDetails = cacheAccessor.getAuthDetailsIfAvailable(); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available.", e))); } if (authDetails == null) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available." + " Please log in with Azure Tools for IntelliJ plugin in the IDE."))); } String authType = authDetails.getAuthMethod(); if ("SP".equalsIgnoreCase(authType)) { Map<String, String> spDetails = cacheAccessor .getIntellijServicePrincipalDetails(authDetails.getCredFilePath()); String authorityUrl = spDetails.get("authURL") + spDetails.get("tenant"); try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(spDetails.get("client"), ClientCredentialFactory.createFromSecret(spDetails.get("key"))) .authority(authorityUrl).validateAuthority(options.getAuthorityValidation()); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } } else if ("DC".equalsIgnoreCase(authType)) { LOGGER.verbose("IntelliJ Authentication => Device Code Authentication scheme detected in Azure Tools" + " for IntelliJ Plugin."); if (isADFSTenant()) { LOGGER.verbose("IntelliJ Authentication => The input tenant is detected to be ADFS and" + " the ADFS tenants are not supported via IntelliJ Authentication currently."); return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJCredential " + "authentication unavailable. ADFS tenant/authorities are not supported."))); } try { JsonNode intelliJCredentials = cacheAccessor.getDeviceCodeCredentials(); String refreshToken = intelliJCredentials.get("refreshToken").textValue(); RefreshTokenParameters.RefreshTokenParametersBuilder refreshTokenParametersBuilder = RefreshTokenParameters.builder(new HashSet<>(request.getScopes()), refreshToken); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); refreshTokenParametersBuilder.claims(customClaimRequest); } return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(refreshTokenParametersBuilder.build())) .map(MsalToken::new)); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e)); } } else { LOGGER.verbose("IntelliJ Authentication = > Only Service Principal and Device Code Authentication" + " schemes are currently supported via IntelliJ Credential currently. Please ensure you used one" + " of those schemes from Azure Tools for IntelliJ plugin."); return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available." + " Please login with Azure Tools for IntelliJ plugin in the IDE."))); } } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ /** * Asynchronously acquire a token from Active Directory with Azure Power Shell. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzurePowerShell(TokenRequestContext request) { List<CredentialUnavailableException> exceptions = new ArrayList<>(2); PowershellManager defaultPowerShellManager = new PowershellManager(Platform.isWindows() ? DEFAULT_WINDOWS_PS_EXECUTABLE : DEFAULT_LINUX_PS_EXECUTABLE); PowershellManager legacyPowerShellManager = Platform.isWindows() ? new PowershellManager(LEGACY_WINDOWS_PS_EXECUTABLE) : null; List<PowershellManager> powershellManagers = new ArrayList<>(2); powershellManagers.add(defaultPowerShellManager); if (legacyPowerShellManager != null) { powershellManagers.add(legacyPowerShellManager); } return Flux.fromIterable(powershellManagers) .flatMap(powershellManager -> getAccessTokenFromPowerShell(request, powershellManager) .onErrorResume(t -> { if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) { return Mono.error(new ClientAuthenticationException( "Azure Powershell authentication failed. Error Details: " + t.getMessage() + ". To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: null, t)); } exceptions.add((CredentialUnavailableException) t); return Mono.empty(); }), 1) .next() .switchIfEmpty(Mono.defer(() -> { CredentialUnavailableException last = exceptions.get(exceptions.size() - 1); for (int z = exceptions.size() - 2; z >= 0; z--) { CredentialUnavailableException current = exceptions.get(z); last = new CredentialUnavailableException("Azure PowerShell authentication failed using default" + "powershell(pwsh) with following error: " + current.getMessage() + "\r\n" + "Azure PowerShell authentication failed using powershell-core(powershell)" + " with following error: " + last.getMessage(), last.getCause()); } return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, (last))); })); } /** * Asynchronously acquire a token from Active Directory with Azure PowerShell. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithOBO(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> confidentialClient.acquireToken(OnBehalfOfParameters .builder(new HashSet<>(request.getScopes()), options.getUserAssertion()) .tenant(IdentityUtil.resolveTenantId(tenantId, request, options)) .build())) .map(MsalToken::new)); } private Mono<AccessToken> getAccessTokenFromPowerShell(TokenRequestContext request, PowershellManager powershellManager) { return powershellManager.initSession() .flatMap(manager -> { String azAccountsCommand = "Import-Module Az.Accounts -MinimumVersion 2.2.0 -PassThru"; return manager.runCommand(azAccountsCommand) .flatMap(output -> { if (output.contains("The specified module 'Az.Accounts' with version '2.2.0' was not loaded " + "because no valid module file")) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Az.Account module with version >= 2.2.0 is not installed. It needs to be installed to" + " use Azure PowerShell Credential."))); } LOGGER.verbose("Az.accounts module was found installed."); StringBuilder accessTokenCommand = new StringBuilder("Get-AzAccessToken -ResourceUrl "); accessTokenCommand.append(ScopeUtil.scopesToResource(request.getScopes())); accessTokenCommand.append(" | ConvertTo-Json"); String command = accessTokenCommand.toString(); LOGGER.verbose("Azure Powershell Authentication => Executing the command `%s` in Azure " + "Powershell to retrieve the Access Token.", accessTokenCommand); return manager.runCommand(accessTokenCommand.toString()) .flatMap(out -> { if (out.contains("Run Connect-AzAccount to login")) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Run Connect-AzAccount to login to Azure account in PowerShell."))); } try { LOGGER.verbose("Azure Powershell Authentication => Attempting to deserialize the " + "received response from Azure Powershell."); Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(out, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("Token"); String time = objectMap.get("ExpiresOn"); OffsetDateTime expiresOn = OffsetDateTime.parse(time) .withOffsetSameInstant(ZoneOffset.UTC); return Mono.just(new AccessToken(accessToken, expiresOn)); } catch (IOException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Encountered error when deserializing response from Azure Power Shell.", e))); } }); }); }).doFinally(ignored -> powershellManager.close()); } /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClient(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { ClientCredentialParameters.ClientCredentialParametersBuilder builder = ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); return confidentialClient.acquireToken(builder.build()); } )).map(MsalToken::new); } private HttpPipeline setupPipeline(HttpClient httpClient) { List<HttpPipelinePolicy> policies = new ArrayList<>(); HttpLogOptions httpLogOptions = new HttpLogOptions(); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RetryPolicy()); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder().httpClient(httpClient) .policies(policies.toArray(new HttpPipelinePolicy[0])).build(); } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { UserNamePasswordParameters.UserNamePasswordParametersBuilder userNamePasswordParametersBuilder = UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest .formatAsClaimsRequest(request.getClaims()); userNamePasswordParametersBuilder.claims(customClaimRequest); } userNamePasswordParametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); return pc.acquireToken(userNamePasswordParametersBuilder.build()); } )).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with username and " + "password. To mitigate this issue, please refer to the troubleshooting guidelines " + "here at https: null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @param account the account used to log in to acquire the last token * @return a Publisher that emits an AccessToken */ @SuppressWarnings("deprecation") public Mono<MsalToken> authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); parametersBuilder.forceRefresh(true); } if (account != null) { parametersBuilder = parametersBuilder.account(account); } parametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); try { return pc.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET))) .switchIfEmpty(Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder forceParametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())).forceRefresh(true); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest .formatAsClaimsRequest(request.getClaims()); forceParametersBuilder.claims(customClaimRequest); } if (account != null) { forceParametersBuilder = forceParametersBuilder.account(account); } forceParametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); try { return pc.acquireTokenSilently(forceParametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new))); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ @SuppressWarnings("deprecation") public Mono<AccessToken> authenticateWithConfidentialClientCache(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())) .tenant(IdentityUtil.resolveTenantId(tenantId, request, options)); try { return confidentialClient.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(ar -> (AccessToken) new MsalToken(ar)) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET)))); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return publicClientApplicationAccessor.getValue().flatMap(pc -> Mono.fromFuture(() -> { DeviceCodeFlowParameters.DeviceCodeFlowParametersBuilder parametersBuilder = DeviceCodeFlowParameters.builder( new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept( new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } return pc.acquireToken(parametersBuilder.build()); }).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with device code", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with Visual Studio cached refresh token. * * @param request the details of the token request * @return a Publisher that emits an AccessToken. */ public Mono<MsalToken> authenticateWithVsCodeCredential(TokenRequestContext request, String cloud) { if (isADFSTenant()) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("VsCodeCredential " + "authentication unavailable. ADFS tenant/authorities are not supported. " + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } VisualStudioCacheAccessor accessor = new VisualStudioCacheAccessor(); String credential = null; try { credential = accessor.getCredentials("VS Code Azure", cloud); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e)); } RefreshTokenParameters.RefreshTokenParametersBuilder parametersBuilder = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), credential); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parametersBuilder.build())) .onErrorResume(t -> { if (t instanceof MsalInteractionRequiredException) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("Failed to acquire token with" + " VS code credential." + " To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } return Mono.error(new ClientAuthenticationException("Failed to acquire token with" + " VS code credential", null, t)); }) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { AuthorizationCodeParameters.AuthorizationCodeParametersBuilder parametersBuilder = AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } Mono<IAuthenticationResult> acquireToken; if (clientSecret != null) { acquireToken = confidentialClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build()))); } else { acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build()))); } return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with authorization code", null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @param redirectUrl the redirect URL to listen on and receive security code * @param loginHint the username suggestion to pre-fill the login page's username/email address field * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, Integer port, String redirectUrl, String loginHint) { URI redirectUri; String redirect; if (port != null) { redirect = HTTP_LOCALHOST + ":" + port; } else if (redirectUrl != null) { redirect = redirectUrl; } else { redirect = HTTP_LOCALHOST; } try { redirectUri = new URI(redirect); } catch (URISyntaxException e) { return Mono.error(LOGGER.logExceptionAsError(new RuntimeException(e))); } InteractiveRequestParameters.InteractiveRequestParametersBuilder builder = InteractiveRequestParameters.builder(redirectUri) .scopes(new HashSet<>(request.getScopes())) .prompt(Prompt.SELECT_ACCOUNT) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); builder.claims(customClaimRequest); } if (loginHint != null) { builder.loginHint(loginHint); } Mono<IAuthenticationResult> acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(builder.build()))); return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with Interactive Browser Authentication.", null, t)).map(MsalToken::new); } /** * Gets token from shared token cache * */ public Mono<MsalToken> authenticateWithSharedTokenCache(TokenRequestContext request, String username) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.getAccounts()) .onErrorMap(t -> new CredentialUnavailableException( "Cannot get accounts from token cache. Error: " + t.getMessage(), t)) .flatMap(set -> { IAccount requestedAccount; Map<String, IAccount> accounts = new HashMap<>(); if (set.isEmpty()) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("SharedTokenCacheCredential " + "authentication unavailable. No accounts were found in the cache."))); } for (IAccount cached : set) { if (username == null || username.equals(cached.username())) { if (!accounts.containsKey(cached.homeAccountId())) { accounts.put(cached.homeAccountId(), cached); } } } if (accounts.isEmpty()) { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. No account matching the specified username: %s was " + "found in the cache.", username))); } else if (accounts.size() > 1) { if (username == null) { return Mono.error(new RuntimeException("SharedTokenCacheCredential authentication " + "unavailable. Multiple accounts were found in the cache. Use username and " + "tenant id to disambiguate.")); } else { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. Multiple accounts matching the specified username: " + "%s were found in the cache.", username))); } } else { requestedAccount = accounts.values().iterator().next(); } return authenticateWithPublicClientCache(request, requestedAccount); })); } /** * Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToArcManagedIdentityEndpoint(String identityEndpoint, TokenRequestContext request) { return Mono.fromCallable(() -> { HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(ScopeUtil.scopesToResource(request.getScopes()), StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode("2019-11-01", StandardCharsets.UTF_8.name())); URL url = new URL(String.format("%s?%s", identityEndpoint, payload)); String secretKey = null; try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); } catch (IOException e) { if (connection == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to initialize " + "Http URL connection to the endpoint.", null, e)); } int status = connection.getResponseCode(); if (status != 401) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException(String.format("Expected a 401" + " Unauthorized response from Azure Arc Managed Identity Endpoint, received: %d", status), null, e)); } String realm = connection.getHeaderField("WWW-Authenticate"); if (realm == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a value" + " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint", null)); } int separatorIndex = realm.indexOf("="); if (separatorIndex == -1) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a correct value" + " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint", null)); } String secretKeyPath = realm.substring(separatorIndex + 1); secretKey = new String(Files.readAllBytes(Paths.get(secretKeyPath)), StandardCharsets.UTF_8); } finally { if (connection != null) { connection.disconnect(); } } if (secretKey == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a secret value" + " in the response from Azure Arc Managed Identity Endpoint", null)); } try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Authorization", String.format("Basic %s", secretKey)); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner scanner = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = scanner.hasNext() ? scanner.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithExchangeToken(TokenRequestContext request) { return clientAssertionAccessor.getValue() .flatMap(assertionToken -> Mono.fromCallable(() -> { String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId + "/oauth2/v2.0/token"; StringBuilder urlParametersBuilder = new StringBuilder(); urlParametersBuilder.append("client_assertion="); urlParametersBuilder.append(assertionToken); urlParametersBuilder.append("&client_assertion_type=urn:ietf:params:oauth:client-assertion-type" + ":jwt-bearer"); urlParametersBuilder.append("&client_id="); urlParametersBuilder.append(clientId); urlParametersBuilder.append("&grant_type=client_credentials"); urlParametersBuilder.append("&scope="); urlParametersBuilder.append(URLEncoder.encode(request.getScopes().get(0), StandardCharsets.UTF_8.name())); String urlParams = urlParametersBuilder.toString(); byte[] postData = urlParams.getBytes(StandardCharsets.UTF_8); int postDataLength = postData.length; HttpURLConnection connection = null; URL url = new URL(authorityUrl); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("POST"); connection.setRequestProperty("Content-Type", "application/x-www-form-urlencoded"); connection.setRequestProperty("Content-Length", Integer.toString(postDataLength)); connection.setDoOutput(true); try (DataOutputStream outputStream = new DataOutputStream(connection.getOutputStream())) { outputStream.write(postData); } connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } })); } /** * Asynchronously acquire a token from the Azure Service Fabric Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param identityHeader the identity header to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToServiceFabricManagedIdentityEndpoint(String identityEndpoint, String identityHeader, String thumbprint, TokenRequestContext request) { return Mono.fromCallable(() -> { HttpsURLConnection connection = null; String endpoint = identityEndpoint; String headerValue = identityHeader; String endpointVersion = SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION; String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } try { URL url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpsURLConnection) url.openConnection(); IdentitySslUtil.addTrustedCertificateThumbprint(connection, thumbprint, LOGGER); connection.setRequestMethod("GET"); if (headerValue != null) { connection.setRequestProperty("Secret", headerValue); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param identityHeader the identity header to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String identityEndpoint, String identityHeader, TokenRequestContext request) { return Mono.fromCallable(() -> { String endpoint; String headerValue; String endpointVersion; endpoint = identityEndpoint; headerValue = identityHeader; endpointVersion = IDENTITY_ENDPOINT_VERSION; String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } try { URL url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (headerValue != null) { if (IDENTITY_ENDPOINT_VERSION.equals(endpointVersion)) { connection.setRequestProperty("X-IDENTITY-HEADER", headerValue); } else { connection.setRequestProperty("Secret", headerValue); } } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", StandardCharsets.UTF_8.name())); payload.append("&resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } } catch (IOException exception) { return Mono.error(exception); } String endpoint = TRAILING_FORWARD_SLASHES.matcher(options.getImdsAuthorityHost()).replaceAll("") + IdentityConstants.DEFAULT_IMDS_TOKENPATH; return checkIMDSAvailable(endpoint).flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw LOGGER.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode; try { responseCode = connection.getResponseCode(); } catch (Exception e) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } if (responseCode == 400) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established.", null)); } if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw LOGGER.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw LOGGER.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable(String endpoint) { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", StandardCharsets.UTF_8.name())); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("%s?%s", endpoint, payload)); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } catch (Exception e) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } private String getSafeWorkingDirectory() { if (isWindowsPlatform()) { if (CoreUtils.isNullOrEmpty(DEFAULT_WINDOWS_SYSTEM_ROOT)) { return null; } return DEFAULT_WINDOWS_SYSTEM_ROOT + "\\system32"; } else { return DEFAULT_MAC_LINUX_PATH; } } private boolean isWindowsPlatform() { return System.getProperty("os.name").contains("Windows"); } private String redactInfo(String input) { return ACCESS_TOKEN_PATTERN.matcher(input).replaceAll("****"); } void openUrl(String url) throws IOException { Runtime rt = Runtime.getRuntime(); String os = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (os.contains("win")) { rt.exec("rundll32 url.dll,FileProtocolHandler " + url); } else if (os.contains("mac")) { rt.exec("open " + url); } else if (os.contains("nix") || os.contains("nux")) { rt.exec("xdg-open " + url); } else { LOGGER.error("Browser could not be opened - please open {} in a browser on this device.", url); } } private CompletableFuture<IAuthenticationResult> getFailedCompletableFuture(Exception e) { CompletableFuture<IAuthenticationResult> completableFuture = new CompletableFuture<>(); completableFuture.completeExceptionally(e); return completableFuture; } private void initializeHttpPipelineAdapter() { HttpPipeline httpPipeline = options.getHttpPipeline(); if (httpPipeline != null) { httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline); } else { HttpClient httpClient = options.getHttpClient(); if (httpClient != null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient)); } else if (options.getProxyOptions() == null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault())); } } } /** * Get the configured tenant id. * * @return the tenant id. */ public String getTenantId() { return tenantId; } /** * Get the configured client id. * * @return the client id. */ public String getClientId() { return clientId; } /** * Get the configured identity client options. * * @return the client options. */ public IdentityClientOptions getIdentityClientOptions() { return options; } private boolean isADFSTenant() { return this.tenantId.equals(ADFS_TENANT); } private byte[] getCertificateBytes() throws IOException { if (certificatePath != null) { return Files.readAllBytes(Paths.get(certificatePath)); } else if (certificate != null) { ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); byte[] buffer = new byte[1024]; int read = certificate.read(buffer, 0, buffer.length); while (read != -1) { outputStream.write(buffer, 0, read); read = certificate.read(buffer, 0, buffer.length); } return outputStream.toByteArray(); } else { return new byte[0]; } } private InputStream getCertificateInputStream() throws IOException { if (certificatePath != null) { return new BufferedInputStream(new FileInputStream(certificatePath)); } else { return certificate; } } }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private static final String WINDOWS_STARTER = "cmd.exe"; private static final String LINUX_MAC_STARTER = "/bin/sh"; private static final String WINDOWS_SWITCHER = "/c"; private static final String LINUX_MAC_SWITCHER = "-c"; private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized"; private static final Pattern LINUX_MAC_PROCESS_ERROR_MESSAGE = Pattern.compile("(.*)az:(.*)not found"); private static final String DEFAULT_WINDOWS_SYSTEM_ROOT = System.getenv("SystemRoot"); private static final String DEFAULT_WINDOWS_PS_EXECUTABLE = "pwsh.exe"; private static final String LEGACY_WINDOWS_PS_EXECUTABLE = "powershell.exe"; private static final String DEFAULT_LINUX_PS_EXECUTABLE = "pwsh"; private static final String DEFAULT_MAC_LINUX_PATH = "/bin/"; private static final Duration REFRESH_OFFSET = Duration.ofMinutes(5); private static final String IDENTITY_ENDPOINT_VERSION = "2019-08-01"; private static final String MSI_ENDPOINT_VERSION = "2017-09-01"; private static final String ADFS_TENANT = "adfs"; private static final String HTTP_LOCALHOST = "http: private static final String SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION = "2019-07-01-preview"; private static final ClientLogger LOGGER = new ClientLogger(IdentityClient.class); private static final Pattern ACCESS_TOKEN_PATTERN = Pattern.compile("\"accessToken\": \"(.*?)(\"|$)"); private static final Pattern TRAILING_FORWARD_SLASHES = Pattern.compile("/+$"); private final IdentityClientOptions options; private final String tenantId; private final String clientId; private final String resourceId; private final String clientSecret; private final String clientAssertionFilePath; private final InputStream certificate; private final String certificatePath; private final Supplier<String> clientAssertionSupplier; private final String certificatePassword; private HttpPipelineAdapter httpPipelineAdapter; private final SynchronizedAccessor<PublicClientApplication> publicClientApplicationAccessor; private final SynchronizedAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor; private final SynchronizedAccessor<String> clientAssertionAccessor; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param clientSecret the client secret of the application. * @param resourceId the resource ID of the application * @param certificatePath the path to the PKCS12 or PEM certificate of the application. * @param certificate the PKCS12 or PEM certificate of the application. * @param certificatePassword the password protecting the PFX certificate. * @param isSharedTokenCacheCredential Indicate whether the credential is * {@link com.azure.identity.SharedTokenCacheCredential} or not. * @param clientAssertionTimeout the timeout to use for the client assertion. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, String clientSecret, String certificatePath, String clientAssertionFilePath, String resourceId, Supplier<String> clientAssertionSupplier, InputStream certificate, String certificatePassword, boolean isSharedTokenCacheCredential, Duration clientAssertionTimeout, IdentityClientOptions options) { if (tenantId == null) { tenantId = "organizations"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.resourceId = resourceId; this.clientSecret = clientSecret; this.clientAssertionFilePath = clientAssertionFilePath; this.certificatePath = certificatePath; this.certificate = certificate; this.certificatePassword = certificatePassword; this.clientAssertionSupplier = clientAssertionSupplier; this.options = options; this.publicClientApplicationAccessor = new SynchronizedAccessor<>(() -> getPublicClientApplication(isSharedTokenCacheCredential)); this.confidentialClientApplicationAccessor = new SynchronizedAccessor<>(() -> getConfidentialClientApplication()); this.clientAssertionAccessor = clientAssertionTimeout == null ? new SynchronizedAccessor<>(() -> parseClientAssertion(), Duration.ofMinutes(5)) : new SynchronizedAccessor<>(() -> parseClientAssertion(), clientAssertionTimeout); } private Mono<ConfidentialClientApplication> getConfidentialClientApplication() { return Mono.defer(() -> { if (clientId == null) { return Mono.error(LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication."))); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; IClientCredential credential; if (clientSecret != null) { credential = ClientCredentialFactory.createFromSecret(clientSecret); } else if (certificate != null || certificatePath != null) { try { if (certificatePassword == null) { byte[] pemCertificateBytes = getCertificateBytes(); List<X509Certificate> x509CertificateList = CertificateUtil.publicKeyFromPem(pemCertificateBytes); PrivateKey privateKey = CertificateUtil.privateKeyFromPem(pemCertificateBytes); if (x509CertificateList.size() == 1) { credential = ClientCredentialFactory.createFromCertificate( privateKey, x509CertificateList.get(0)); } else { credential = ClientCredentialFactory.createFromCertificateChain( privateKey, x509CertificateList); } } else { try (InputStream pfxCertificateStream = getCertificateInputStream()) { credential = ClientCredentialFactory.createFromCertificate(pfxCertificateStream, certificatePassword); } } } catch (IOException | GeneralSecurityException e) { return Mono.error(LOGGER.logExceptionAsError(new RuntimeException( "Failed to parse the certificate for the credential: " + e.getMessage(), e))); } } else if (clientAssertionSupplier != null) { credential = ClientCredentialFactory.createFromClientAssertion(clientAssertionSupplier.get()); } else { return Mono.error(LOGGER.logExceptionAsError( new IllegalArgumentException("Must provide client secret or client certificate path." + " To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, credential); try { applicationBuilder = applicationBuilder.authority(authorityUrl) .validateAuthority(options.getAuthorityValidation()); } catch (MalformedURLException e) { return Mono.error(LOGGER.logExceptionAsWarning(new IllegalStateException(e))); } applicationBuilder.sendX5c(options.isIncludeX5c()); initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl() .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); applicationBuilder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { return Mono.error(LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t))); } } if (options.getRegionalAuthority() != null) { if (options.getRegionalAuthority() == RegionalAuthority.AUTO_DISCOVER_REGION) { applicationBuilder.autoDetectRegion(true); } else { applicationBuilder.azureRegion(options.getRegionalAuthority().toString()); } } ConfidentialClientApplication confidentialClientApplication = applicationBuilder.build(); return tokenCache != null ? tokenCache.registerCache() .map(ignored -> confidentialClientApplication) : Mono.just(confidentialClientApplication); }); } private Mono<String> parseClientAssertion() { return Mono.fromCallable(() -> { if (clientAssertionFilePath != null) { byte[] encoded = Files.readAllBytes(Paths.get(clientAssertionFilePath)); return new String(encoded, StandardCharsets.UTF_8); } else { throw LOGGER.logExceptionAsError(new IllegalStateException( "Client Assertion File Path is not provided." + " It should be provided to authenticate with client assertion." )); } }); } private Mono<PublicClientApplication> getPublicClientApplication(boolean sharedTokenCacheCredential) { return Mono.defer(() -> { if (clientId == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl) .validateAuthority(options.getAuthorityValidation()); } catch (MalformedURLException e) { throw LOGGER.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { publicClientApplicationBuilder.httpClient(httpPipelineAdapter); } else { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { publicClientApplicationBuilder.executorService(options.getExecutorService()); } if (!options.isCp1Disabled()) { Set<String> set = new HashSet<>(1); set.add("CP1"); publicClientApplicationBuilder.clientCapabilities(set); } return Mono.just(publicClientApplicationBuilder); }).flatMap(builder -> { TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl() .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); builder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t)); } } PublicClientApplication publicClientApplication = builder.build(); return tokenCache != null ? tokenCache.registerCache() .map(ignored -> publicClientApplication) : Mono.just(publicClientApplication); }); } public Mono<MsalToken> authenticateWithIntelliJ(TokenRequestContext request) { try { IntelliJCacheAccessor cacheAccessor = new IntelliJCacheAccessor(options.getIntelliJKeePassDatabasePath()); IntelliJAuthMethodDetails authDetails; try { authDetails = cacheAccessor.getAuthDetailsIfAvailable(); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available.", e))); } if (authDetails == null) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available." + " Please log in with Azure Tools for IntelliJ plugin in the IDE."))); } String authType = authDetails.getAuthMethod(); if ("SP".equalsIgnoreCase(authType)) { Map<String, String> spDetails = cacheAccessor .getIntellijServicePrincipalDetails(authDetails.getCredFilePath()); String authorityUrl = spDetails.get("authURL") + spDetails.get("tenant"); try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(spDetails.get("client"), ClientCredentialFactory.createFromSecret(spDetails.get("key"))) .authority(authorityUrl).validateAuthority(options.getAuthorityValidation()); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } } else if ("DC".equalsIgnoreCase(authType)) { LOGGER.verbose("IntelliJ Authentication => Device Code Authentication scheme detected in Azure Tools" + " for IntelliJ Plugin."); if (isADFSTenant()) { LOGGER.verbose("IntelliJ Authentication => The input tenant is detected to be ADFS and" + " the ADFS tenants are not supported via IntelliJ Authentication currently."); return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJCredential " + "authentication unavailable. ADFS tenant/authorities are not supported."))); } try { JsonNode intelliJCredentials = cacheAccessor.getDeviceCodeCredentials(); String refreshToken = intelliJCredentials.get("refreshToken").textValue(); RefreshTokenParameters.RefreshTokenParametersBuilder refreshTokenParametersBuilder = RefreshTokenParameters.builder(new HashSet<>(request.getScopes()), refreshToken); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); refreshTokenParametersBuilder.claims(customClaimRequest); } return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(refreshTokenParametersBuilder.build())) .map(MsalToken::new)); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e)); } } else { LOGGER.verbose("IntelliJ Authentication = > Only Service Principal and Device Code Authentication" + " schemes are currently supported via IntelliJ Credential currently. Please ensure you used one" + " of those schemes from Azure Tools for IntelliJ plugin."); return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available." + " Please login with Azure Tools for IntelliJ plugin in the IDE."))); } } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ /** * Asynchronously acquire a token from Active Directory with Azure Power Shell. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzurePowerShell(TokenRequestContext request) { List<CredentialUnavailableException> exceptions = new ArrayList<>(2); PowershellManager defaultPowerShellManager = new PowershellManager(Platform.isWindows() ? DEFAULT_WINDOWS_PS_EXECUTABLE : DEFAULT_LINUX_PS_EXECUTABLE); PowershellManager legacyPowerShellManager = Platform.isWindows() ? new PowershellManager(LEGACY_WINDOWS_PS_EXECUTABLE) : null; List<PowershellManager> powershellManagers = new ArrayList<>(2); powershellManagers.add(defaultPowerShellManager); if (legacyPowerShellManager != null) { powershellManagers.add(legacyPowerShellManager); } return Flux.fromIterable(powershellManagers) .flatMap(powershellManager -> getAccessTokenFromPowerShell(request, powershellManager) .onErrorResume(t -> { if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) { return Mono.error(new ClientAuthenticationException( "Azure Powershell authentication failed. Error Details: " + t.getMessage() + ". To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: null, t)); } exceptions.add((CredentialUnavailableException) t); return Mono.empty(); }), 1) .next() .switchIfEmpty(Mono.defer(() -> { CredentialUnavailableException last = exceptions.get(exceptions.size() - 1); for (int z = exceptions.size() - 2; z >= 0; z--) { CredentialUnavailableException current = exceptions.get(z); last = new CredentialUnavailableException("Azure PowerShell authentication failed using default" + "powershell(pwsh) with following error: " + current.getMessage() + "\r\n" + "Azure PowerShell authentication failed using powershell-core(powershell)" + " with following error: " + last.getMessage(), last.getCause()); } return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, (last))); })); } /** * Asynchronously acquire a token from Active Directory with Azure PowerShell. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithOBO(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> confidentialClient.acquireToken(OnBehalfOfParameters .builder(new HashSet<>(request.getScopes()), options.getUserAssertion()) .tenant(IdentityUtil.resolveTenantId(tenantId, request, options)) .build())) .map(MsalToken::new)); } private Mono<AccessToken> getAccessTokenFromPowerShell(TokenRequestContext request, PowershellManager powershellManager) { return powershellManager.initSession() .flatMap(manager -> { String azAccountsCommand = "Import-Module Az.Accounts -MinimumVersion 2.2.0 -PassThru"; return manager.runCommand(azAccountsCommand) .flatMap(output -> { if (output.contains("The specified module 'Az.Accounts' with version '2.2.0' was not loaded " + "because no valid module file")) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Az.Account module with version >= 2.2.0 is not installed. It needs to be installed to" + " use Azure PowerShell Credential."))); } LOGGER.verbose("Az.accounts module was found installed."); StringBuilder accessTokenCommand = new StringBuilder("Get-AzAccessToken -ResourceUrl "); accessTokenCommand.append(ScopeUtil.scopesToResource(request.getScopes())); accessTokenCommand.append(" | ConvertTo-Json"); String command = accessTokenCommand.toString(); LOGGER.verbose("Azure Powershell Authentication => Executing the command `%s` in Azure " + "Powershell to retrieve the Access Token.", accessTokenCommand); return manager.runCommand(accessTokenCommand.toString()) .flatMap(out -> { if (out.contains("Run Connect-AzAccount to login")) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Run Connect-AzAccount to login to Azure account in PowerShell."))); } try { LOGGER.verbose("Azure Powershell Authentication => Attempting to deserialize the " + "received response from Azure Powershell."); Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(out, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("Token"); String time = objectMap.get("ExpiresOn"); OffsetDateTime expiresOn = OffsetDateTime.parse(time) .withOffsetSameInstant(ZoneOffset.UTC); return Mono.just(new AccessToken(accessToken, expiresOn)); } catch (IOException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Encountered error when deserializing response from Azure Power Shell.", e))); } }); }); }).doFinally(ignored -> powershellManager.close()); } /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClient(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { ClientCredentialParameters.ClientCredentialParametersBuilder builder = ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); return confidentialClient.acquireToken(builder.build()); } )).map(MsalToken::new); } private HttpPipeline setupPipeline(HttpClient httpClient) { List<HttpPipelinePolicy> policies = new ArrayList<>(); HttpLogOptions httpLogOptions = new HttpLogOptions(); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RetryPolicy()); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder().httpClient(httpClient) .policies(policies.toArray(new HttpPipelinePolicy[0])).build(); } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { UserNamePasswordParameters.UserNamePasswordParametersBuilder userNamePasswordParametersBuilder = UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest .formatAsClaimsRequest(request.getClaims()); userNamePasswordParametersBuilder.claims(customClaimRequest); } userNamePasswordParametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); return pc.acquireToken(userNamePasswordParametersBuilder.build()); } )).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with username and " + "password. To mitigate this issue, please refer to the troubleshooting guidelines " + "here at https: null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @param account the account used to log in to acquire the last token * @return a Publisher that emits an AccessToken */ @SuppressWarnings("deprecation") public Mono<MsalToken> authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); parametersBuilder.forceRefresh(true); } if (account != null) { parametersBuilder = parametersBuilder.account(account); } parametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); try { return pc.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET))) .switchIfEmpty(Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder forceParametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())).forceRefresh(true); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest .formatAsClaimsRequest(request.getClaims()); forceParametersBuilder.claims(customClaimRequest); } if (account != null) { forceParametersBuilder = forceParametersBuilder.account(account); } forceParametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); try { return pc.acquireTokenSilently(forceParametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new))); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ @SuppressWarnings("deprecation") public Mono<AccessToken> authenticateWithConfidentialClientCache(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())) .tenant(IdentityUtil.resolveTenantId(tenantId, request, options)); try { return confidentialClient.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(ar -> (AccessToken) new MsalToken(ar)) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET)))); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return publicClientApplicationAccessor.getValue().flatMap(pc -> Mono.fromFuture(() -> { DeviceCodeFlowParameters.DeviceCodeFlowParametersBuilder parametersBuilder = DeviceCodeFlowParameters.builder( new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept( new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } return pc.acquireToken(parametersBuilder.build()); }).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with device code", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with Visual Studio cached refresh token. * * @param request the details of the token request * @return a Publisher that emits an AccessToken. */ public Mono<MsalToken> authenticateWithVsCodeCredential(TokenRequestContext request, String cloud) { if (isADFSTenant()) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("VsCodeCredential " + "authentication unavailable. ADFS tenant/authorities are not supported. " + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } VisualStudioCacheAccessor accessor = new VisualStudioCacheAccessor(); String credential = null; try { credential = accessor.getCredentials("VS Code Azure", cloud); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e)); } RefreshTokenParameters.RefreshTokenParametersBuilder parametersBuilder = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), credential); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parametersBuilder.build())) .onErrorResume(t -> { if (t instanceof MsalInteractionRequiredException) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("Failed to acquire token with" + " VS code credential." + " To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } return Mono.error(new ClientAuthenticationException("Failed to acquire token with" + " VS code credential", null, t)); }) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { AuthorizationCodeParameters.AuthorizationCodeParametersBuilder parametersBuilder = AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } Mono<IAuthenticationResult> acquireToken; if (clientSecret != null) { acquireToken = confidentialClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build()))); } else { acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build()))); } return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with authorization code", null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @param redirectUrl the redirect URL to listen on and receive security code * @param loginHint the username suggestion to pre-fill the login page's username/email address field * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, Integer port, String redirectUrl, String loginHint) { URI redirectUri; String redirect; if (port != null) { redirect = HTTP_LOCALHOST + ":" + port; } else if (redirectUrl != null) { redirect = redirectUrl; } else { redirect = HTTP_LOCALHOST; } try { redirectUri = new URI(redirect); } catch (URISyntaxException e) { return Mono.error(LOGGER.logExceptionAsError(new RuntimeException(e))); } InteractiveRequestParameters.InteractiveRequestParametersBuilder builder = InteractiveRequestParameters.builder(redirectUri) .scopes(new HashSet<>(request.getScopes())) .prompt(Prompt.SELECT_ACCOUNT) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); builder.claims(customClaimRequest); } if (loginHint != null) { builder.loginHint(loginHint); } Mono<IAuthenticationResult> acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(builder.build()))); return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with Interactive Browser Authentication.", null, t)).map(MsalToken::new); } /** * Gets token from shared token cache * */ public Mono<MsalToken> authenticateWithSharedTokenCache(TokenRequestContext request, String username) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.getAccounts()) .onErrorMap(t -> new CredentialUnavailableException( "Cannot get accounts from token cache. Error: " + t.getMessage(), t)) .flatMap(set -> { IAccount requestedAccount; Map<String, IAccount> accounts = new HashMap<>(); if (set.isEmpty()) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("SharedTokenCacheCredential " + "authentication unavailable. No accounts were found in the cache."))); } for (IAccount cached : set) { if (username == null || username.equals(cached.username())) { if (!accounts.containsKey(cached.homeAccountId())) { accounts.put(cached.homeAccountId(), cached); } } } if (accounts.isEmpty()) { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. No account matching the specified username: %s was " + "found in the cache.", username))); } else if (accounts.size() > 1) { if (username == null) { return Mono.error(new RuntimeException("SharedTokenCacheCredential authentication " + "unavailable. Multiple accounts were found in the cache. Use username and " + "tenant id to disambiguate.")); } else { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. Multiple accounts matching the specified username: " + "%s were found in the cache.", username))); } } else { requestedAccount = accounts.values().iterator().next(); } return authenticateWithPublicClientCache(request, requestedAccount); })); } /** * Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToArcManagedIdentityEndpoint(String identityEndpoint, TokenRequestContext request) { return Mono.fromCallable(() -> { HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(ScopeUtil.scopesToResource(request.getScopes()), StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode("2019-11-01", StandardCharsets.UTF_8.name())); URL url = new URL(String.format("%s?%s", identityEndpoint, payload)); String secretKey = null; try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); } catch (IOException e) { if (connection == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to initialize " + "Http URL connection to the endpoint.", null, e)); } int status = connection.getResponseCode(); if (status != 401) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException(String.format("Expected a 401" + " Unauthorized response from Azure Arc Managed Identity Endpoint, received: %d", status), null, e)); } String realm = connection.getHeaderField("WWW-Authenticate"); if (realm == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a value" + " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint", null)); } int separatorIndex = realm.indexOf("="); if (separatorIndex == -1) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a correct value" + " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint", null)); } String secretKeyPath = realm.substring(separatorIndex + 1); secretKey = new String(Files.readAllBytes(Paths.get(secretKeyPath)), StandardCharsets.UTF_8); } finally { if (connection != null) { connection.disconnect(); } } if (secretKey == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a secret value" + " in the response from Azure Arc Managed Identity Endpoint", null)); } try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Authorization", String.format("Basic %s", secretKey)); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner scanner = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = scanner.hasNext() ? scanner.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithExchangeToken(TokenRequestContext request) { return clientAssertionAccessor.getValue() .flatMap(assertionToken -> Mono.fromCallable(() -> { String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId + "/oauth2/v2.0/token"; StringBuilder urlParametersBuilder = new StringBuilder(); urlParametersBuilder.append("client_assertion="); urlParametersBuilder.append(assertionToken); urlParametersBuilder.append("&client_assertion_type=urn:ietf:params:oauth:client-assertion-type" + ":jwt-bearer"); urlParametersBuilder.append("&client_id="); urlParametersBuilder.append(clientId); urlParametersBuilder.append("&grant_type=client_credentials"); urlParametersBuilder.append("&scope="); urlParametersBuilder.append(URLEncoder.encode(request.getScopes().get(0), StandardCharsets.UTF_8.name())); String urlParams = urlParametersBuilder.toString(); byte[] postData = urlParams.getBytes(StandardCharsets.UTF_8); int postDataLength = postData.length; HttpURLConnection connection = null; URL url = new URL(authorityUrl); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("POST"); connection.setRequestProperty("Content-Type", "application/x-www-form-urlencoded"); connection.setRequestProperty("Content-Length", Integer.toString(postDataLength)); connection.setDoOutput(true); try (DataOutputStream outputStream = new DataOutputStream(connection.getOutputStream())) { outputStream.write(postData); } connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } })); } /** * Asynchronously acquire a token from the Azure Service Fabric Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param identityHeader the identity header to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToServiceFabricManagedIdentityEndpoint(String identityEndpoint, String identityHeader, String thumbprint, TokenRequestContext request) { return Mono.fromCallable(() -> { HttpsURLConnection connection = null; String endpoint = identityEndpoint; String headerValue = identityHeader; String endpointVersion = SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION; String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } try { URL url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpsURLConnection) url.openConnection(); IdentitySslUtil.addTrustedCertificateThumbprint(connection, thumbprint, LOGGER); connection.setRequestMethod("GET"); if (headerValue != null) { connection.setRequestProperty("Secret", headerValue); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param identityHeader the identity header to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String identityEndpoint, String identityHeader, TokenRequestContext request) { return Mono.fromCallable(() -> { String endpoint; String headerValue; String endpointVersion; endpoint = identityEndpoint; headerValue = identityHeader; endpointVersion = IDENTITY_ENDPOINT_VERSION; String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } try { URL url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (headerValue != null) { if (IDENTITY_ENDPOINT_VERSION.equals(endpointVersion)) { connection.setRequestProperty("X-IDENTITY-HEADER", headerValue); } else { connection.setRequestProperty("Secret", headerValue); } } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", StandardCharsets.UTF_8.name())); payload.append("&resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } } catch (IOException exception) { return Mono.error(exception); } String endpoint = TRAILING_FORWARD_SLASHES.matcher(options.getImdsAuthorityHost()).replaceAll("") + IdentityConstants.DEFAULT_IMDS_TOKENPATH; return checkIMDSAvailable(endpoint).flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw LOGGER.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode; try { responseCode = connection.getResponseCode(); } catch (Exception e) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } if (responseCode == 400) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established.", null)); } if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw LOGGER.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw LOGGER.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable(String endpoint) { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", StandardCharsets.UTF_8.name())); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("%s?%s", endpoint, payload)); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } catch (Exception e) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } private String getSafeWorkingDirectory() { if (isWindowsPlatform()) { if (CoreUtils.isNullOrEmpty(DEFAULT_WINDOWS_SYSTEM_ROOT)) { return null; } return DEFAULT_WINDOWS_SYSTEM_ROOT + "\\system32"; } else { return DEFAULT_MAC_LINUX_PATH; } } private boolean isWindowsPlatform() { return System.getProperty("os.name").contains("Windows"); } private String redactInfo(String input) { return ACCESS_TOKEN_PATTERN.matcher(input).replaceAll("****"); } void openUrl(String url) throws IOException { Runtime rt = Runtime.getRuntime(); String os = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (os.contains("win")) { rt.exec("rundll32 url.dll,FileProtocolHandler " + url); } else if (os.contains("mac")) { rt.exec("open " + url); } else if (os.contains("nix") || os.contains("nux")) { rt.exec("xdg-open " + url); } else { LOGGER.error("Browser could not be opened - please open {} in a browser on this device.", url); } } private CompletableFuture<IAuthenticationResult> getFailedCompletableFuture(Exception e) { CompletableFuture<IAuthenticationResult> completableFuture = new CompletableFuture<>(); completableFuture.completeExceptionally(e); return completableFuture; } private void initializeHttpPipelineAdapter() { HttpPipeline httpPipeline = options.getHttpPipeline(); if (httpPipeline != null) { httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline); } else { HttpClient httpClient = options.getHttpClient(); if (httpClient != null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient)); } else if (options.getProxyOptions() == null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault())); } } } /** * Get the configured tenant id. * * @return the tenant id. */ public String getTenantId() { return tenantId; } /** * Get the configured client id. * * @return the client id. */ public String getClientId() { return clientId; } /** * Get the configured identity client options. * * @return the client options. */ public IdentityClientOptions getIdentityClientOptions() { return options; } private boolean isADFSTenant() { return this.tenantId.equals(ADFS_TENANT); } private byte[] getCertificateBytes() throws IOException { if (certificatePath != null) { return Files.readAllBytes(Paths.get(certificatePath)); } else if (certificate != null) { ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); byte[] buffer = new byte[1024]; int read = certificate.read(buffer, 0, buffer.length); while (read != -1) { outputStream.write(buffer, 0, read); read = certificate.read(buffer, 0, buffer.length); } return outputStream.toByteArray(); } else { return new byte[0]; } } private InputStream getCertificateInputStream() throws IOException { if (certificatePath != null) { return new BufferedInputStream(new FileInputStream(certificatePath)); } else { return certificate; } } }
Use try-with-resources to remove a try/finally, also the finally block had a return which doesn't make sense
public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { StringBuilder azCommand = new StringBuilder("az account get-access-token --output json --resource "); String scopes = ScopeUtil.scopesToResource(request.getScopes()); try { ScopeUtil.validateScope(scopes); } catch (IllegalArgumentException ex) { return Mono.error(LOGGER.logExceptionAsError(ex)); } azCommand.append(scopes); String tenant = IdentityUtil.resolveTenantId(null, request, options); if (!CoreUtils.isNullOrEmpty(tenant)) { azCommand.append("--tenant ").append(tenant); } AccessToken token; try { String starter; String switcher; if (isWindowsPlatform()) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, azCommand.toString()); String workingDirectory = getSafeWorkingDirectory(); if (workingDirectory != null) { builder.directory(new File(workingDirectory)); } else { throw LOGGER.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be" + " found to execute CLI command from. To mitigate this issue, please refer to the troubleshooting " + " guidelines here at https: } builder.redirectErrorStream(true); Process process = builder.start(); StringBuilder output = new StringBuilder(); try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8.name()))) { String line; while (true) { line = reader.readLine(); if (line == null) { break; } if (line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || LINUX_MAC_PROCESS_ERROR_MESSAGE.matcher(line).matches()) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable. Azure CLI not installed." + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } output.append(line); } } String processOutput = output.toString(); process.waitFor(10, TimeUnit.SECONDS); if (process.exitValue() != 0) { if (processOutput.length() > 0) { String redactedOutput = redactInfo(processOutput); if (redactedOutput.contains("az login") || redactedOutput.contains("az account set")) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable." + " Please run 'az login' to set up account. To further mitigate this" + " issue, please refer to the troubleshooting guidelines here at " + "https: } throw LOGGER.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null)); } else { throw LOGGER.logExceptionAsError( new ClientAuthenticationException("Failed to invoke Azure CLI ", null)); } } LOGGER.verbose("Azure CLI Authentication => A token response was received from Azure CLI, deserializing the" + " response into an Access Token."); Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME) .atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException | InterruptedException e) { throw LOGGER.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(e instanceof CredentialUnavailableException ? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e) : LOGGER.logExceptionAsError(e)); } return Mono.just(token); }
try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream(),
public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { StringBuilder azCommand = new StringBuilder("az account get-access-token --output json --resource "); String scopes = ScopeUtil.scopesToResource(request.getScopes()); try { ScopeUtil.validateScope(scopes); } catch (IllegalArgumentException ex) { return Mono.error(LOGGER.logExceptionAsError(ex)); } azCommand.append(scopes); String tenant = IdentityUtil.resolveTenantId(null, request, options); if (!CoreUtils.isNullOrEmpty(tenant)) { azCommand.append("--tenant ").append(tenant); } AccessToken token; try { String starter; String switcher; if (isWindowsPlatform()) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, azCommand.toString()); String workingDirectory = getSafeWorkingDirectory(); if (workingDirectory != null) { builder.directory(new File(workingDirectory)); } else { throw LOGGER.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be" + " found to execute CLI command from. To mitigate this issue, please refer to the troubleshooting " + " guidelines here at https: } builder.redirectErrorStream(true); Process process = builder.start(); StringBuilder output = new StringBuilder(); try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8.name()))) { String line; while (true) { line = reader.readLine(); if (line == null) { break; } if (line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || LINUX_MAC_PROCESS_ERROR_MESSAGE.matcher(line).matches()) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable. Azure CLI not installed." + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } output.append(line); } } String processOutput = output.toString(); process.waitFor(10, TimeUnit.SECONDS); if (process.exitValue() != 0) { if (processOutput.length() > 0) { String redactedOutput = redactInfo(processOutput); if (redactedOutput.contains("az login") || redactedOutput.contains("az account set")) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable." + " Please run 'az login' to set up account. To further mitigate this" + " issue, please refer to the troubleshooting guidelines here at " + "https: } throw LOGGER.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null)); } else { throw LOGGER.logExceptionAsError( new ClientAuthenticationException("Failed to invoke Azure CLI ", null)); } } LOGGER.verbose("Azure CLI Authentication => A token response was received from Azure CLI, deserializing the" + " response into an Access Token."); Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME) .atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException | InterruptedException e) { throw LOGGER.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(e instanceof CredentialUnavailableException ? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e) : LOGGER.logExceptionAsError(e)); } return Mono.just(token); }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private static final String WINDOWS_STARTER = "cmd.exe"; private static final String LINUX_MAC_STARTER = "/bin/sh"; private static final String WINDOWS_SWITCHER = "/c"; private static final String LINUX_MAC_SWITCHER = "-c"; private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized"; private static final Pattern LINUX_MAC_PROCESS_ERROR_MESSAGE = Pattern.compile("(.*)az:(.*)not found"); private static final String DEFAULT_WINDOWS_SYSTEM_ROOT = System.getenv("SystemRoot"); private static final String DEFAULT_WINDOWS_PS_EXECUTABLE = "pwsh.exe"; private static final String LEGACY_WINDOWS_PS_EXECUTABLE = "powershell.exe"; private static final String DEFAULT_LINUX_PS_EXECUTABLE = "pwsh"; private static final String DEFAULT_MAC_LINUX_PATH = "/bin/"; private static final Duration REFRESH_OFFSET = Duration.ofMinutes(5); private static final String IDENTITY_ENDPOINT_VERSION = "2019-08-01"; private static final String MSI_ENDPOINT_VERSION = "2017-09-01"; private static final String ADFS_TENANT = "adfs"; private static final String HTTP_LOCALHOST = "http: private static final String SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION = "2019-07-01-preview"; private static final ClientLogger LOGGER = new ClientLogger(IdentityClient.class); private static final Pattern ACCESS_TOKEN_PATTERN = Pattern.compile("\"accessToken\": \"(.*?)(\"|$)"); private static final Pattern TRAILING_FORWARD_SLASHES = Pattern.compile("/+$"); private final IdentityClientOptions options; private final String tenantId; private final String clientId; private final String resourceId; private final String clientSecret; private final String clientAssertionFilePath; private final InputStream certificate; private final String certificatePath; private final Supplier<String> clientAssertionSupplier; private final String certificatePassword; private HttpPipelineAdapter httpPipelineAdapter; private final SynchronizedAccessor<PublicClientApplication> publicClientApplicationAccessor; private final SynchronizedAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor; private final SynchronizedAccessor<String> clientAssertionAccessor; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param clientSecret the client secret of the application. * @param resourceId the resource ID of the application * @param certificatePath the path to the PKCS12 or PEM certificate of the application. * @param certificate the PKCS12 or PEM certificate of the application. * @param certificatePassword the password protecting the PFX certificate. * @param isSharedTokenCacheCredential Indicate whether the credential is * {@link com.azure.identity.SharedTokenCacheCredential} or not. * @param clientAssertionTimeout the timeout to use for the client assertion. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, String clientSecret, String certificatePath, String clientAssertionFilePath, String resourceId, Supplier<String> clientAssertionSupplier, InputStream certificate, String certificatePassword, boolean isSharedTokenCacheCredential, Duration clientAssertionTimeout, IdentityClientOptions options) { if (tenantId == null) { tenantId = "organizations"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.resourceId = resourceId; this.clientSecret = clientSecret; this.clientAssertionFilePath = clientAssertionFilePath; this.certificatePath = certificatePath; this.certificate = certificate; this.certificatePassword = certificatePassword; this.clientAssertionSupplier = clientAssertionSupplier; this.options = options; this.publicClientApplicationAccessor = new SynchronizedAccessor<>(() -> getPublicClientApplication(isSharedTokenCacheCredential)); this.confidentialClientApplicationAccessor = new SynchronizedAccessor<>(() -> getConfidentialClientApplication()); this.clientAssertionAccessor = clientAssertionTimeout == null ? new SynchronizedAccessor<>(() -> parseClientAssertion(), Duration.ofMinutes(5)) : new SynchronizedAccessor<>(() -> parseClientAssertion(), clientAssertionTimeout); } private Mono<ConfidentialClientApplication> getConfidentialClientApplication() { return Mono.defer(() -> { if (clientId == null) { return Mono.error(LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication."))); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; IClientCredential credential; if (clientSecret != null) { credential = ClientCredentialFactory.createFromSecret(clientSecret); } else if (certificate != null || certificatePath != null) { try { if (certificatePassword == null) { byte[] pemCertificateBytes = getCertificateBytes(); List<X509Certificate> x509CertificateList = CertificateUtil.publicKeyFromPem(pemCertificateBytes); PrivateKey privateKey = CertificateUtil.privateKeyFromPem(pemCertificateBytes); if (x509CertificateList.size() == 1) { credential = ClientCredentialFactory.createFromCertificate( privateKey, x509CertificateList.get(0)); } else { credential = ClientCredentialFactory.createFromCertificateChain( privateKey, x509CertificateList); } } else { try (InputStream pfxCertificateStream = getCertificateInputStream()) { credential = ClientCredentialFactory.createFromCertificate(pfxCertificateStream, certificatePassword); } } } catch (IOException | GeneralSecurityException e) { return Mono.error(LOGGER.logExceptionAsError(new RuntimeException( "Failed to parse the certificate for the credential: " + e.getMessage(), e))); } } else if (clientAssertionSupplier != null) { credential = ClientCredentialFactory.createFromClientAssertion(clientAssertionSupplier.get()); } else { return Mono.error(LOGGER.logExceptionAsError( new IllegalArgumentException("Must provide client secret or client certificate path." + " To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, credential); try { applicationBuilder = applicationBuilder.authority(authorityUrl) .validateAuthority(options.getAuthorityValidation()); } catch (MalformedURLException e) { return Mono.error(LOGGER.logExceptionAsWarning(new IllegalStateException(e))); } applicationBuilder.sendX5c(options.isIncludeX5c()); initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl() .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); applicationBuilder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { return Mono.error(LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t))); } } if (options.getRegionalAuthority() != null) { if (options.getRegionalAuthority() == RegionalAuthority.AUTO_DISCOVER_REGION) { applicationBuilder.autoDetectRegion(true); } else { applicationBuilder.azureRegion(options.getRegionalAuthority().toString()); } } ConfidentialClientApplication confidentialClientApplication = applicationBuilder.build(); return tokenCache != null ? tokenCache.registerCache() .map(ignored -> confidentialClientApplication) : Mono.just(confidentialClientApplication); }); } private Mono<String> parseClientAssertion() { return Mono.fromCallable(() -> { if (clientAssertionFilePath != null) { byte[] encoded = Files.readAllBytes(Paths.get(clientAssertionFilePath)); return new String(encoded, StandardCharsets.UTF_8); } else { throw LOGGER.logExceptionAsError(new IllegalStateException( "Client Assertion File Path is not provided." + " It should be provided to authenticate with client assertion." )); } }); } private Mono<PublicClientApplication> getPublicClientApplication(boolean sharedTokenCacheCredential) { return Mono.defer(() -> { if (clientId == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl) .validateAuthority(options.getAuthorityValidation()); } catch (MalformedURLException e) { throw LOGGER.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { publicClientApplicationBuilder.httpClient(httpPipelineAdapter); } else { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { publicClientApplicationBuilder.executorService(options.getExecutorService()); } if (!options.isCp1Disabled()) { Set<String> set = new HashSet<>(1); set.add("CP1"); publicClientApplicationBuilder.clientCapabilities(set); } return Mono.just(publicClientApplicationBuilder); }).flatMap(builder -> { TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl() .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); builder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t)); } } PublicClientApplication publicClientApplication = builder.build(); return tokenCache != null ? tokenCache.registerCache() .map(ignored -> publicClientApplication) : Mono.just(publicClientApplication); }); } public Mono<MsalToken> authenticateWithIntelliJ(TokenRequestContext request) { try { IntelliJCacheAccessor cacheAccessor = new IntelliJCacheAccessor(options.getIntelliJKeePassDatabasePath()); IntelliJAuthMethodDetails authDetails; try { authDetails = cacheAccessor.getAuthDetailsIfAvailable(); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available.", e))); } if (authDetails == null) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available." + " Please log in with Azure Tools for IntelliJ plugin in the IDE."))); } String authType = authDetails.getAuthMethod(); if ("SP".equalsIgnoreCase(authType)) { Map<String, String> spDetails = cacheAccessor .getIntellijServicePrincipalDetails(authDetails.getCredFilePath()); String authorityUrl = spDetails.get("authURL") + spDetails.get("tenant"); try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(spDetails.get("client"), ClientCredentialFactory.createFromSecret(spDetails.get("key"))) .authority(authorityUrl).validateAuthority(options.getAuthorityValidation()); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } } else if ("DC".equalsIgnoreCase(authType)) { LOGGER.verbose("IntelliJ Authentication => Device Code Authentication scheme detected in Azure Tools" + " for IntelliJ Plugin."); if (isADFSTenant()) { LOGGER.verbose("IntelliJ Authentication => The input tenant is detected to be ADFS and" + " the ADFS tenants are not supported via IntelliJ Authentication currently."); return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJCredential " + "authentication unavailable. ADFS tenant/authorities are not supported."))); } try { JsonNode intelliJCredentials = cacheAccessor.getDeviceCodeCredentials(); String refreshToken = intelliJCredentials.get("refreshToken").textValue(); RefreshTokenParameters.RefreshTokenParametersBuilder refreshTokenParametersBuilder = RefreshTokenParameters.builder(new HashSet<>(request.getScopes()), refreshToken); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); refreshTokenParametersBuilder.claims(customClaimRequest); } return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(refreshTokenParametersBuilder.build())) .map(MsalToken::new)); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e)); } } else { LOGGER.verbose("IntelliJ Authentication = > Only Service Principal and Device Code Authentication" + " schemes are currently supported via IntelliJ Credential currently. Please ensure you used one" + " of those schemes from Azure Tools for IntelliJ plugin."); return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available." + " Please login with Azure Tools for IntelliJ plugin in the IDE."))); } } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ /** * Asynchronously acquire a token from Active Directory with Azure Power Shell. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzurePowerShell(TokenRequestContext request) { List<CredentialUnavailableException> exceptions = new ArrayList<>(2); PowershellManager defaultPowerShellManager = new PowershellManager(Platform.isWindows() ? DEFAULT_WINDOWS_PS_EXECUTABLE : DEFAULT_LINUX_PS_EXECUTABLE); PowershellManager legacyPowerShellManager = Platform.isWindows() ? new PowershellManager(LEGACY_WINDOWS_PS_EXECUTABLE) : null; List<PowershellManager> powershellManagers = new ArrayList<>(2); powershellManagers.add(defaultPowerShellManager); if (legacyPowerShellManager != null) { powershellManagers.add(legacyPowerShellManager); } return Flux.fromIterable(powershellManagers) .flatMap(powershellManager -> getAccessTokenFromPowerShell(request, powershellManager) .onErrorResume(t -> { if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) { return Mono.error(new ClientAuthenticationException( "Azure Powershell authentication failed. Error Details: " + t.getMessage() + ". To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: null, t)); } exceptions.add((CredentialUnavailableException) t); return Mono.empty(); }), 1) .next() .switchIfEmpty(Mono.defer(() -> { CredentialUnavailableException last = exceptions.get(exceptions.size() - 1); for (int z = exceptions.size() - 2; z >= 0; z--) { CredentialUnavailableException current = exceptions.get(z); last = new CredentialUnavailableException("Azure PowerShell authentication failed using default" + "powershell(pwsh) with following error: " + current.getMessage() + "\r\n" + "Azure PowerShell authentication failed using powershell-core(powershell)" + " with following error: " + last.getMessage(), last.getCause()); } return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, (last))); })); } /** * Asynchronously acquire a token from Active Directory with Azure PowerShell. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithOBO(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> confidentialClient.acquireToken(OnBehalfOfParameters .builder(new HashSet<>(request.getScopes()), options.getUserAssertion()) .tenant(IdentityUtil.resolveTenantId(tenantId, request, options)) .build())) .map(MsalToken::new)); } private Mono<AccessToken> getAccessTokenFromPowerShell(TokenRequestContext request, PowershellManager powershellManager) { return powershellManager.initSession() .flatMap(manager -> { String azAccountsCommand = "Import-Module Az.Accounts -MinimumVersion 2.2.0 -PassThru"; return manager.runCommand(azAccountsCommand) .flatMap(output -> { if (output.contains("The specified module 'Az.Accounts' with version '2.2.0' was not loaded " + "because no valid module file")) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Az.Account module with version >= 2.2.0 is not installed. It needs to be installed to" + " use Azure PowerShell Credential."))); } LOGGER.verbose("Az.accounts module was found installed."); StringBuilder accessTokenCommand = new StringBuilder("Get-AzAccessToken -ResourceUrl "); accessTokenCommand.append(ScopeUtil.scopesToResource(request.getScopes())); accessTokenCommand.append(" | ConvertTo-Json"); String command = accessTokenCommand.toString(); LOGGER.verbose("Azure Powershell Authentication => Executing the command `%s` in Azure " + "Powershell to retrieve the Access Token.", accessTokenCommand); return manager.runCommand(accessTokenCommand.toString()) .flatMap(out -> { if (out.contains("Run Connect-AzAccount to login")) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Run Connect-AzAccount to login to Azure account in PowerShell."))); } try { LOGGER.verbose("Azure Powershell Authentication => Attempting to deserialize the " + "received response from Azure Powershell."); Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(out, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("Token"); String time = objectMap.get("ExpiresOn"); OffsetDateTime expiresOn = OffsetDateTime.parse(time) .withOffsetSameInstant(ZoneOffset.UTC); return Mono.just(new AccessToken(accessToken, expiresOn)); } catch (IOException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Encountered error when deserializing response from Azure Power Shell.", e))); } }); }); }).doFinally(ignored -> powershellManager.close()); } /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClient(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { ClientCredentialParameters.ClientCredentialParametersBuilder builder = ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); return confidentialClient.acquireToken(builder.build()); } )).map(MsalToken::new); } private HttpPipeline setupPipeline(HttpClient httpClient) { List<HttpPipelinePolicy> policies = new ArrayList<>(); HttpLogOptions httpLogOptions = new HttpLogOptions(); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RetryPolicy()); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder().httpClient(httpClient) .policies(policies.toArray(new HttpPipelinePolicy[0])).build(); } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { UserNamePasswordParameters.UserNamePasswordParametersBuilder userNamePasswordParametersBuilder = UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest .formatAsClaimsRequest(request.getClaims()); userNamePasswordParametersBuilder.claims(customClaimRequest); } userNamePasswordParametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); return pc.acquireToken(userNamePasswordParametersBuilder.build()); } )).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with username and " + "password. To mitigate this issue, please refer to the troubleshooting guidelines " + "here at https: null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @param account the account used to log in to acquire the last token * @return a Publisher that emits an AccessToken */ @SuppressWarnings("deprecation") public Mono<MsalToken> authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); parametersBuilder.forceRefresh(true); } if (account != null) { parametersBuilder = parametersBuilder.account(account); } parametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); try { return pc.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET))) .switchIfEmpty(Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder forceParametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())).forceRefresh(true); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest .formatAsClaimsRequest(request.getClaims()); forceParametersBuilder.claims(customClaimRequest); } if (account != null) { forceParametersBuilder = forceParametersBuilder.account(account); } forceParametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); try { return pc.acquireTokenSilently(forceParametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new))); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ @SuppressWarnings("deprecation") public Mono<AccessToken> authenticateWithConfidentialClientCache(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())) .tenant(IdentityUtil.resolveTenantId(tenantId, request, options)); try { return confidentialClient.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(ar -> (AccessToken) new MsalToken(ar)) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET)))); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return publicClientApplicationAccessor.getValue().flatMap(pc -> Mono.fromFuture(() -> { DeviceCodeFlowParameters.DeviceCodeFlowParametersBuilder parametersBuilder = DeviceCodeFlowParameters.builder( new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept( new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } return pc.acquireToken(parametersBuilder.build()); }).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with device code", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with Visual Studio cached refresh token. * * @param request the details of the token request * @return a Publisher that emits an AccessToken. */ public Mono<MsalToken> authenticateWithVsCodeCredential(TokenRequestContext request, String cloud) { if (isADFSTenant()) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("VsCodeCredential " + "authentication unavailable. ADFS tenant/authorities are not supported. " + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } VisualStudioCacheAccessor accessor = new VisualStudioCacheAccessor(); String credential = null; try { credential = accessor.getCredentials("VS Code Azure", cloud); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e)); } RefreshTokenParameters.RefreshTokenParametersBuilder parametersBuilder = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), credential); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parametersBuilder.build())) .onErrorResume(t -> { if (t instanceof MsalInteractionRequiredException) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("Failed to acquire token with" + " VS code credential." + " To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } return Mono.error(new ClientAuthenticationException("Failed to acquire token with" + " VS code credential", null, t)); }) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { AuthorizationCodeParameters.AuthorizationCodeParametersBuilder parametersBuilder = AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } Mono<IAuthenticationResult> acquireToken; if (clientSecret != null) { acquireToken = confidentialClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build()))); } else { acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build()))); } return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with authorization code", null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @param redirectUrl the redirect URL to listen on and receive security code * @param loginHint the username suggestion to pre-fill the login page's username/email address field * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, Integer port, String redirectUrl, String loginHint) { URI redirectUri; String redirect; if (port != null) { redirect = HTTP_LOCALHOST + ":" + port; } else if (redirectUrl != null) { redirect = redirectUrl; } else { redirect = HTTP_LOCALHOST; } try { redirectUri = new URI(redirect); } catch (URISyntaxException e) { return Mono.error(LOGGER.logExceptionAsError(new RuntimeException(e))); } InteractiveRequestParameters.InteractiveRequestParametersBuilder builder = InteractiveRequestParameters.builder(redirectUri) .scopes(new HashSet<>(request.getScopes())) .prompt(Prompt.SELECT_ACCOUNT) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); builder.claims(customClaimRequest); } if (loginHint != null) { builder.loginHint(loginHint); } Mono<IAuthenticationResult> acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(builder.build()))); return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with Interactive Browser Authentication.", null, t)).map(MsalToken::new); } /** * Gets token from shared token cache * */ public Mono<MsalToken> authenticateWithSharedTokenCache(TokenRequestContext request, String username) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.getAccounts()) .onErrorMap(t -> new CredentialUnavailableException( "Cannot get accounts from token cache. Error: " + t.getMessage(), t)) .flatMap(set -> { IAccount requestedAccount; Map<String, IAccount> accounts = new HashMap<>(); if (set.isEmpty()) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("SharedTokenCacheCredential " + "authentication unavailable. No accounts were found in the cache."))); } for (IAccount cached : set) { if (username == null || username.equals(cached.username())) { if (!accounts.containsKey(cached.homeAccountId())) { accounts.put(cached.homeAccountId(), cached); } } } if (accounts.isEmpty()) { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. No account matching the specified username: %s was " + "found in the cache.", username))); } else if (accounts.size() > 1) { if (username == null) { return Mono.error(new RuntimeException("SharedTokenCacheCredential authentication " + "unavailable. Multiple accounts were found in the cache. Use username and " + "tenant id to disambiguate.")); } else { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. Multiple accounts matching the specified username: " + "%s were found in the cache.", username))); } } else { requestedAccount = accounts.values().iterator().next(); } return authenticateWithPublicClientCache(request, requestedAccount); })); } /** * Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToArcManagedIdentityEndpoint(String identityEndpoint, TokenRequestContext request) { return Mono.fromCallable(() -> { HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(ScopeUtil.scopesToResource(request.getScopes()), StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode("2019-11-01", StandardCharsets.UTF_8.name())); URL url = new URL(String.format("%s?%s", identityEndpoint, payload)); String secretKey = null; try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); } catch (IOException e) { if (connection == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to initialize " + "Http URL connection to the endpoint.", null, e)); } int status = connection.getResponseCode(); if (status != 401) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException(String.format("Expected a 401" + " Unauthorized response from Azure Arc Managed Identity Endpoint, received: %d", status), null, e)); } String realm = connection.getHeaderField("WWW-Authenticate"); if (realm == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a value" + " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint", null)); } int separatorIndex = realm.indexOf("="); if (separatorIndex == -1) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a correct value" + " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint", null)); } String secretKeyPath = realm.substring(separatorIndex + 1); secretKey = new String(Files.readAllBytes(Paths.get(secretKeyPath)), StandardCharsets.UTF_8); } finally { if (connection != null) { connection.disconnect(); } } if (secretKey == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a secret value" + " in the response from Azure Arc Managed Identity Endpoint", null)); } try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Authorization", String.format("Basic %s", secretKey)); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner scanner = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = scanner.hasNext() ? scanner.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithExchangeToken(TokenRequestContext request) { return clientAssertionAccessor.getValue() .flatMap(assertionToken -> Mono.fromCallable(() -> { String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId + "/oauth2/v2.0/token"; StringBuilder urlParametersBuilder = new StringBuilder(); urlParametersBuilder.append("client_assertion="); urlParametersBuilder.append(assertionToken); urlParametersBuilder.append("&client_assertion_type=urn:ietf:params:oauth:client-assertion-type" + ":jwt-bearer"); urlParametersBuilder.append("&client_id="); urlParametersBuilder.append(clientId); urlParametersBuilder.append("&grant_type=client_credentials"); urlParametersBuilder.append("&scope="); urlParametersBuilder.append(URLEncoder.encode(request.getScopes().get(0), StandardCharsets.UTF_8.name())); String urlParams = urlParametersBuilder.toString(); byte[] postData = urlParams.getBytes(StandardCharsets.UTF_8); int postDataLength = postData.length; HttpURLConnection connection = null; URL url = new URL(authorityUrl); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("POST"); connection.setRequestProperty("Content-Type", "application/x-www-form-urlencoded"); connection.setRequestProperty("Content-Length", Integer.toString(postDataLength)); connection.setDoOutput(true); try (DataOutputStream outputStream = new DataOutputStream(connection.getOutputStream())) { outputStream.write(postData); } connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } })); } /** * Asynchronously acquire a token from the Azure Service Fabric Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param identityHeader the identity header to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToServiceFabricManagedIdentityEndpoint(String identityEndpoint, String identityHeader, String thumbprint, TokenRequestContext request) { return Mono.fromCallable(() -> { HttpsURLConnection connection = null; String endpoint = identityEndpoint; String headerValue = identityHeader; String endpointVersion = SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION; String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } try { URL url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpsURLConnection) url.openConnection(); IdentitySslUtil.addTrustedCertificateThumbprint(connection, thumbprint, LOGGER); connection.setRequestMethod("GET"); if (headerValue != null) { connection.setRequestProperty("Secret", headerValue); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param identityHeader the identity header to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String identityEndpoint, String identityHeader, TokenRequestContext request) { return Mono.fromCallable(() -> { String endpoint; String headerValue; String endpointVersion; endpoint = identityEndpoint; headerValue = identityHeader; endpointVersion = IDENTITY_ENDPOINT_VERSION; String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } try { URL url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (headerValue != null) { if (IDENTITY_ENDPOINT_VERSION.equals(endpointVersion)) { connection.setRequestProperty("X-IDENTITY-HEADER", headerValue); } else { connection.setRequestProperty("Secret", headerValue); } } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", StandardCharsets.UTF_8.name())); payload.append("&resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } } catch (IOException exception) { return Mono.error(exception); } String endpoint = TRAILING_FORWARD_SLASHES.matcher(options.getImdsAuthorityHost()).replaceAll("") + IdentityConstants.DEFAULT_IMDS_TOKENPATH; return checkIMDSAvailable(endpoint).flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw LOGGER.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode; try { responseCode = connection.getResponseCode(); } catch (Exception e) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } if (responseCode == 400) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established.", null)); } if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw LOGGER.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw LOGGER.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable(String endpoint) { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", StandardCharsets.UTF_8.name())); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("%s?%s", endpoint, payload)); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } catch (Exception e) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } private String getSafeWorkingDirectory() { if (isWindowsPlatform()) { if (CoreUtils.isNullOrEmpty(DEFAULT_WINDOWS_SYSTEM_ROOT)) { return null; } return DEFAULT_WINDOWS_SYSTEM_ROOT + "\\system32"; } else { return DEFAULT_MAC_LINUX_PATH; } } private boolean isWindowsPlatform() { return System.getProperty("os.name").contains("Windows"); } private String redactInfo(String input) { return ACCESS_TOKEN_PATTERN.matcher(input).replaceAll("****"); } void openUrl(String url) throws IOException { Runtime rt = Runtime.getRuntime(); String os = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (os.contains("win")) { rt.exec("rundll32 url.dll,FileProtocolHandler " + url); } else if (os.contains("mac")) { rt.exec("open " + url); } else if (os.contains("nix") || os.contains("nux")) { rt.exec("xdg-open " + url); } else { LOGGER.error("Browser could not be opened - please open {} in a browser on this device.", url); } } private CompletableFuture<IAuthenticationResult> getFailedCompletableFuture(Exception e) { CompletableFuture<IAuthenticationResult> completableFuture = new CompletableFuture<>(); completableFuture.completeExceptionally(e); return completableFuture; } private void initializeHttpPipelineAdapter() { HttpPipeline httpPipeline = options.getHttpPipeline(); if (httpPipeline != null) { httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline); } else { HttpClient httpClient = options.getHttpClient(); if (httpClient != null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient)); } else if (options.getProxyOptions() == null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault())); } } } /** * Get the configured tenant id. * * @return the tenant id. */ public String getTenantId() { return tenantId; } /** * Get the configured client id. * * @return the client id. */ public String getClientId() { return clientId; } /** * Get the configured identity client options. * * @return the client options. */ public IdentityClientOptions getIdentityClientOptions() { return options; } private boolean isADFSTenant() { return this.tenantId.equals(ADFS_TENANT); } private byte[] getCertificateBytes() throws IOException { if (certificatePath != null) { return Files.readAllBytes(Paths.get(certificatePath)); } else if (certificate != null) { ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); byte[] buffer = new byte[1024]; int read = certificate.read(buffer, 0, buffer.length); while (read != -1) { outputStream.write(buffer, 0, read); read = certificate.read(buffer, 0, buffer.length); } return outputStream.toByteArray(); } else { return new byte[0]; } } private InputStream getCertificateInputStream() throws IOException { if (certificatePath != null) { return new BufferedInputStream(new FileInputStream(certificatePath)); } else { return certificate; } } }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private static final String WINDOWS_STARTER = "cmd.exe"; private static final String LINUX_MAC_STARTER = "/bin/sh"; private static final String WINDOWS_SWITCHER = "/c"; private static final String LINUX_MAC_SWITCHER = "-c"; private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized"; private static final Pattern LINUX_MAC_PROCESS_ERROR_MESSAGE = Pattern.compile("(.*)az:(.*)not found"); private static final String DEFAULT_WINDOWS_SYSTEM_ROOT = System.getenv("SystemRoot"); private static final String DEFAULT_WINDOWS_PS_EXECUTABLE = "pwsh.exe"; private static final String LEGACY_WINDOWS_PS_EXECUTABLE = "powershell.exe"; private static final String DEFAULT_LINUX_PS_EXECUTABLE = "pwsh"; private static final String DEFAULT_MAC_LINUX_PATH = "/bin/"; private static final Duration REFRESH_OFFSET = Duration.ofMinutes(5); private static final String IDENTITY_ENDPOINT_VERSION = "2019-08-01"; private static final String MSI_ENDPOINT_VERSION = "2017-09-01"; private static final String ADFS_TENANT = "adfs"; private static final String HTTP_LOCALHOST = "http: private static final String SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION = "2019-07-01-preview"; private static final ClientLogger LOGGER = new ClientLogger(IdentityClient.class); private static final Pattern ACCESS_TOKEN_PATTERN = Pattern.compile("\"accessToken\": \"(.*?)(\"|$)"); private static final Pattern TRAILING_FORWARD_SLASHES = Pattern.compile("/+$"); private final IdentityClientOptions options; private final String tenantId; private final String clientId; private final String resourceId; private final String clientSecret; private final String clientAssertionFilePath; private final InputStream certificate; private final String certificatePath; private final Supplier<String> clientAssertionSupplier; private final String certificatePassword; private HttpPipelineAdapter httpPipelineAdapter; private final SynchronizedAccessor<PublicClientApplication> publicClientApplicationAccessor; private final SynchronizedAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor; private final SynchronizedAccessor<String> clientAssertionAccessor; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param clientSecret the client secret of the application. * @param resourceId the resource ID of the application * @param certificatePath the path to the PKCS12 or PEM certificate of the application. * @param certificate the PKCS12 or PEM certificate of the application. * @param certificatePassword the password protecting the PFX certificate. * @param isSharedTokenCacheCredential Indicate whether the credential is * {@link com.azure.identity.SharedTokenCacheCredential} or not. * @param clientAssertionTimeout the timeout to use for the client assertion. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, String clientSecret, String certificatePath, String clientAssertionFilePath, String resourceId, Supplier<String> clientAssertionSupplier, InputStream certificate, String certificatePassword, boolean isSharedTokenCacheCredential, Duration clientAssertionTimeout, IdentityClientOptions options) { if (tenantId == null) { tenantId = "organizations"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.resourceId = resourceId; this.clientSecret = clientSecret; this.clientAssertionFilePath = clientAssertionFilePath; this.certificatePath = certificatePath; this.certificate = certificate; this.certificatePassword = certificatePassword; this.clientAssertionSupplier = clientAssertionSupplier; this.options = options; this.publicClientApplicationAccessor = new SynchronizedAccessor<>(() -> getPublicClientApplication(isSharedTokenCacheCredential)); this.confidentialClientApplicationAccessor = new SynchronizedAccessor<>(() -> getConfidentialClientApplication()); this.clientAssertionAccessor = clientAssertionTimeout == null ? new SynchronizedAccessor<>(() -> parseClientAssertion(), Duration.ofMinutes(5)) : new SynchronizedAccessor<>(() -> parseClientAssertion(), clientAssertionTimeout); } private Mono<ConfidentialClientApplication> getConfidentialClientApplication() { return Mono.defer(() -> { if (clientId == null) { return Mono.error(LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication."))); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; IClientCredential credential; if (clientSecret != null) { credential = ClientCredentialFactory.createFromSecret(clientSecret); } else if (certificate != null || certificatePath != null) { try { if (certificatePassword == null) { byte[] pemCertificateBytes = getCertificateBytes(); List<X509Certificate> x509CertificateList = CertificateUtil.publicKeyFromPem(pemCertificateBytes); PrivateKey privateKey = CertificateUtil.privateKeyFromPem(pemCertificateBytes); if (x509CertificateList.size() == 1) { credential = ClientCredentialFactory.createFromCertificate( privateKey, x509CertificateList.get(0)); } else { credential = ClientCredentialFactory.createFromCertificateChain( privateKey, x509CertificateList); } } else { try (InputStream pfxCertificateStream = getCertificateInputStream()) { credential = ClientCredentialFactory.createFromCertificate(pfxCertificateStream, certificatePassword); } } } catch (IOException | GeneralSecurityException e) { return Mono.error(LOGGER.logExceptionAsError(new RuntimeException( "Failed to parse the certificate for the credential: " + e.getMessage(), e))); } } else if (clientAssertionSupplier != null) { credential = ClientCredentialFactory.createFromClientAssertion(clientAssertionSupplier.get()); } else { return Mono.error(LOGGER.logExceptionAsError( new IllegalArgumentException("Must provide client secret or client certificate path." + " To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, credential); try { applicationBuilder = applicationBuilder.authority(authorityUrl) .validateAuthority(options.getAuthorityValidation()); } catch (MalformedURLException e) { return Mono.error(LOGGER.logExceptionAsWarning(new IllegalStateException(e))); } applicationBuilder.sendX5c(options.isIncludeX5c()); initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl() .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); applicationBuilder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { return Mono.error(LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t))); } } if (options.getRegionalAuthority() != null) { if (options.getRegionalAuthority() == RegionalAuthority.AUTO_DISCOVER_REGION) { applicationBuilder.autoDetectRegion(true); } else { applicationBuilder.azureRegion(options.getRegionalAuthority().toString()); } } ConfidentialClientApplication confidentialClientApplication = applicationBuilder.build(); return tokenCache != null ? tokenCache.registerCache() .map(ignored -> confidentialClientApplication) : Mono.just(confidentialClientApplication); }); } private Mono<String> parseClientAssertion() { return Mono.fromCallable(() -> { if (clientAssertionFilePath != null) { byte[] encoded = Files.readAllBytes(Paths.get(clientAssertionFilePath)); return new String(encoded, StandardCharsets.UTF_8); } else { throw LOGGER.logExceptionAsError(new IllegalStateException( "Client Assertion File Path is not provided." + " It should be provided to authenticate with client assertion." )); } }); } private Mono<PublicClientApplication> getPublicClientApplication(boolean sharedTokenCacheCredential) { return Mono.defer(() -> { if (clientId == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl) .validateAuthority(options.getAuthorityValidation()); } catch (MalformedURLException e) { throw LOGGER.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { publicClientApplicationBuilder.httpClient(httpPipelineAdapter); } else { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { publicClientApplicationBuilder.executorService(options.getExecutorService()); } if (!options.isCp1Disabled()) { Set<String> set = new HashSet<>(1); set.add("CP1"); publicClientApplicationBuilder.clientCapabilities(set); } return Mono.just(publicClientApplicationBuilder); }).flatMap(builder -> { TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl() .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); builder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t)); } } PublicClientApplication publicClientApplication = builder.build(); return tokenCache != null ? tokenCache.registerCache() .map(ignored -> publicClientApplication) : Mono.just(publicClientApplication); }); } public Mono<MsalToken> authenticateWithIntelliJ(TokenRequestContext request) { try { IntelliJCacheAccessor cacheAccessor = new IntelliJCacheAccessor(options.getIntelliJKeePassDatabasePath()); IntelliJAuthMethodDetails authDetails; try { authDetails = cacheAccessor.getAuthDetailsIfAvailable(); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available.", e))); } if (authDetails == null) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available." + " Please log in with Azure Tools for IntelliJ plugin in the IDE."))); } String authType = authDetails.getAuthMethod(); if ("SP".equalsIgnoreCase(authType)) { Map<String, String> spDetails = cacheAccessor .getIntellijServicePrincipalDetails(authDetails.getCredFilePath()); String authorityUrl = spDetails.get("authURL") + spDetails.get("tenant"); try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(spDetails.get("client"), ClientCredentialFactory.createFromSecret(spDetails.get("key"))) .authority(authorityUrl).validateAuthority(options.getAuthorityValidation()); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } } else if ("DC".equalsIgnoreCase(authType)) { LOGGER.verbose("IntelliJ Authentication => Device Code Authentication scheme detected in Azure Tools" + " for IntelliJ Plugin."); if (isADFSTenant()) { LOGGER.verbose("IntelliJ Authentication => The input tenant is detected to be ADFS and" + " the ADFS tenants are not supported via IntelliJ Authentication currently."); return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJCredential " + "authentication unavailable. ADFS tenant/authorities are not supported."))); } try { JsonNode intelliJCredentials = cacheAccessor.getDeviceCodeCredentials(); String refreshToken = intelliJCredentials.get("refreshToken").textValue(); RefreshTokenParameters.RefreshTokenParametersBuilder refreshTokenParametersBuilder = RefreshTokenParameters.builder(new HashSet<>(request.getScopes()), refreshToken); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); refreshTokenParametersBuilder.claims(customClaimRequest); } return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(refreshTokenParametersBuilder.build())) .map(MsalToken::new)); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e)); } } else { LOGGER.verbose("IntelliJ Authentication = > Only Service Principal and Device Code Authentication" + " schemes are currently supported via IntelliJ Credential currently. Please ensure you used one" + " of those schemes from Azure Tools for IntelliJ plugin."); return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available." + " Please login with Azure Tools for IntelliJ plugin in the IDE."))); } } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ /** * Asynchronously acquire a token from Active Directory with Azure Power Shell. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzurePowerShell(TokenRequestContext request) { List<CredentialUnavailableException> exceptions = new ArrayList<>(2); PowershellManager defaultPowerShellManager = new PowershellManager(Platform.isWindows() ? DEFAULT_WINDOWS_PS_EXECUTABLE : DEFAULT_LINUX_PS_EXECUTABLE); PowershellManager legacyPowerShellManager = Platform.isWindows() ? new PowershellManager(LEGACY_WINDOWS_PS_EXECUTABLE) : null; List<PowershellManager> powershellManagers = new ArrayList<>(2); powershellManagers.add(defaultPowerShellManager); if (legacyPowerShellManager != null) { powershellManagers.add(legacyPowerShellManager); } return Flux.fromIterable(powershellManagers) .flatMap(powershellManager -> getAccessTokenFromPowerShell(request, powershellManager) .onErrorResume(t -> { if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) { return Mono.error(new ClientAuthenticationException( "Azure Powershell authentication failed. Error Details: " + t.getMessage() + ". To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: null, t)); } exceptions.add((CredentialUnavailableException) t); return Mono.empty(); }), 1) .next() .switchIfEmpty(Mono.defer(() -> { CredentialUnavailableException last = exceptions.get(exceptions.size() - 1); for (int z = exceptions.size() - 2; z >= 0; z--) { CredentialUnavailableException current = exceptions.get(z); last = new CredentialUnavailableException("Azure PowerShell authentication failed using default" + "powershell(pwsh) with following error: " + current.getMessage() + "\r\n" + "Azure PowerShell authentication failed using powershell-core(powershell)" + " with following error: " + last.getMessage(), last.getCause()); } return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, (last))); })); } /** * Asynchronously acquire a token from Active Directory with Azure PowerShell. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithOBO(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> confidentialClient.acquireToken(OnBehalfOfParameters .builder(new HashSet<>(request.getScopes()), options.getUserAssertion()) .tenant(IdentityUtil.resolveTenantId(tenantId, request, options)) .build())) .map(MsalToken::new)); } private Mono<AccessToken> getAccessTokenFromPowerShell(TokenRequestContext request, PowershellManager powershellManager) { return powershellManager.initSession() .flatMap(manager -> { String azAccountsCommand = "Import-Module Az.Accounts -MinimumVersion 2.2.0 -PassThru"; return manager.runCommand(azAccountsCommand) .flatMap(output -> { if (output.contains("The specified module 'Az.Accounts' with version '2.2.0' was not loaded " + "because no valid module file")) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Az.Account module with version >= 2.2.0 is not installed. It needs to be installed to" + " use Azure PowerShell Credential."))); } LOGGER.verbose("Az.accounts module was found installed."); StringBuilder accessTokenCommand = new StringBuilder("Get-AzAccessToken -ResourceUrl "); accessTokenCommand.append(ScopeUtil.scopesToResource(request.getScopes())); accessTokenCommand.append(" | ConvertTo-Json"); String command = accessTokenCommand.toString(); LOGGER.verbose("Azure Powershell Authentication => Executing the command `%s` in Azure " + "Powershell to retrieve the Access Token.", accessTokenCommand); return manager.runCommand(accessTokenCommand.toString()) .flatMap(out -> { if (out.contains("Run Connect-AzAccount to login")) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Run Connect-AzAccount to login to Azure account in PowerShell."))); } try { LOGGER.verbose("Azure Powershell Authentication => Attempting to deserialize the " + "received response from Azure Powershell."); Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(out, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("Token"); String time = objectMap.get("ExpiresOn"); OffsetDateTime expiresOn = OffsetDateTime.parse(time) .withOffsetSameInstant(ZoneOffset.UTC); return Mono.just(new AccessToken(accessToken, expiresOn)); } catch (IOException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Encountered error when deserializing response from Azure Power Shell.", e))); } }); }); }).doFinally(ignored -> powershellManager.close()); } /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClient(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { ClientCredentialParameters.ClientCredentialParametersBuilder builder = ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); return confidentialClient.acquireToken(builder.build()); } )).map(MsalToken::new); } private HttpPipeline setupPipeline(HttpClient httpClient) { List<HttpPipelinePolicy> policies = new ArrayList<>(); HttpLogOptions httpLogOptions = new HttpLogOptions(); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RetryPolicy()); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder().httpClient(httpClient) .policies(policies.toArray(new HttpPipelinePolicy[0])).build(); } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { UserNamePasswordParameters.UserNamePasswordParametersBuilder userNamePasswordParametersBuilder = UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest .formatAsClaimsRequest(request.getClaims()); userNamePasswordParametersBuilder.claims(customClaimRequest); } userNamePasswordParametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); return pc.acquireToken(userNamePasswordParametersBuilder.build()); } )).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with username and " + "password. To mitigate this issue, please refer to the troubleshooting guidelines " + "here at https: null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @param account the account used to log in to acquire the last token * @return a Publisher that emits an AccessToken */ @SuppressWarnings("deprecation") public Mono<MsalToken> authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); parametersBuilder.forceRefresh(true); } if (account != null) { parametersBuilder = parametersBuilder.account(account); } parametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); try { return pc.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET))) .switchIfEmpty(Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder forceParametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())).forceRefresh(true); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest .formatAsClaimsRequest(request.getClaims()); forceParametersBuilder.claims(customClaimRequest); } if (account != null) { forceParametersBuilder = forceParametersBuilder.account(account); } forceParametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); try { return pc.acquireTokenSilently(forceParametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new))); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ @SuppressWarnings("deprecation") public Mono<AccessToken> authenticateWithConfidentialClientCache(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())) .tenant(IdentityUtil.resolveTenantId(tenantId, request, options)); try { return confidentialClient.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(ar -> (AccessToken) new MsalToken(ar)) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET)))); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return publicClientApplicationAccessor.getValue().flatMap(pc -> Mono.fromFuture(() -> { DeviceCodeFlowParameters.DeviceCodeFlowParametersBuilder parametersBuilder = DeviceCodeFlowParameters.builder( new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept( new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } return pc.acquireToken(parametersBuilder.build()); }).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with device code", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with Visual Studio cached refresh token. * * @param request the details of the token request * @return a Publisher that emits an AccessToken. */ public Mono<MsalToken> authenticateWithVsCodeCredential(TokenRequestContext request, String cloud) { if (isADFSTenant()) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("VsCodeCredential " + "authentication unavailable. ADFS tenant/authorities are not supported. " + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } VisualStudioCacheAccessor accessor = new VisualStudioCacheAccessor(); String credential = null; try { credential = accessor.getCredentials("VS Code Azure", cloud); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e)); } RefreshTokenParameters.RefreshTokenParametersBuilder parametersBuilder = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), credential); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parametersBuilder.build())) .onErrorResume(t -> { if (t instanceof MsalInteractionRequiredException) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("Failed to acquire token with" + " VS code credential." + " To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } return Mono.error(new ClientAuthenticationException("Failed to acquire token with" + " VS code credential", null, t)); }) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { AuthorizationCodeParameters.AuthorizationCodeParametersBuilder parametersBuilder = AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } Mono<IAuthenticationResult> acquireToken; if (clientSecret != null) { acquireToken = confidentialClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build()))); } else { acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build()))); } return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with authorization code", null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @param redirectUrl the redirect URL to listen on and receive security code * @param loginHint the username suggestion to pre-fill the login page's username/email address field * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, Integer port, String redirectUrl, String loginHint) { URI redirectUri; String redirect; if (port != null) { redirect = HTTP_LOCALHOST + ":" + port; } else if (redirectUrl != null) { redirect = redirectUrl; } else { redirect = HTTP_LOCALHOST; } try { redirectUri = new URI(redirect); } catch (URISyntaxException e) { return Mono.error(LOGGER.logExceptionAsError(new RuntimeException(e))); } InteractiveRequestParameters.InteractiveRequestParametersBuilder builder = InteractiveRequestParameters.builder(redirectUri) .scopes(new HashSet<>(request.getScopes())) .prompt(Prompt.SELECT_ACCOUNT) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); builder.claims(customClaimRequest); } if (loginHint != null) { builder.loginHint(loginHint); } Mono<IAuthenticationResult> acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(builder.build()))); return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with Interactive Browser Authentication.", null, t)).map(MsalToken::new); } /** * Gets token from shared token cache * */ public Mono<MsalToken> authenticateWithSharedTokenCache(TokenRequestContext request, String username) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.getAccounts()) .onErrorMap(t -> new CredentialUnavailableException( "Cannot get accounts from token cache. Error: " + t.getMessage(), t)) .flatMap(set -> { IAccount requestedAccount; Map<String, IAccount> accounts = new HashMap<>(); if (set.isEmpty()) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("SharedTokenCacheCredential " + "authentication unavailable. No accounts were found in the cache."))); } for (IAccount cached : set) { if (username == null || username.equals(cached.username())) { if (!accounts.containsKey(cached.homeAccountId())) { accounts.put(cached.homeAccountId(), cached); } } } if (accounts.isEmpty()) { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. No account matching the specified username: %s was " + "found in the cache.", username))); } else if (accounts.size() > 1) { if (username == null) { return Mono.error(new RuntimeException("SharedTokenCacheCredential authentication " + "unavailable. Multiple accounts were found in the cache. Use username and " + "tenant id to disambiguate.")); } else { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. Multiple accounts matching the specified username: " + "%s were found in the cache.", username))); } } else { requestedAccount = accounts.values().iterator().next(); } return authenticateWithPublicClientCache(request, requestedAccount); })); } /** * Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToArcManagedIdentityEndpoint(String identityEndpoint, TokenRequestContext request) { return Mono.fromCallable(() -> { HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(ScopeUtil.scopesToResource(request.getScopes()), StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode("2019-11-01", StandardCharsets.UTF_8.name())); URL url = new URL(String.format("%s?%s", identityEndpoint, payload)); String secretKey = null; try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); } catch (IOException e) { if (connection == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to initialize " + "Http URL connection to the endpoint.", null, e)); } int status = connection.getResponseCode(); if (status != 401) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException(String.format("Expected a 401" + " Unauthorized response from Azure Arc Managed Identity Endpoint, received: %d", status), null, e)); } String realm = connection.getHeaderField("WWW-Authenticate"); if (realm == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a value" + " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint", null)); } int separatorIndex = realm.indexOf("="); if (separatorIndex == -1) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a correct value" + " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint", null)); } String secretKeyPath = realm.substring(separatorIndex + 1); secretKey = new String(Files.readAllBytes(Paths.get(secretKeyPath)), StandardCharsets.UTF_8); } finally { if (connection != null) { connection.disconnect(); } } if (secretKey == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a secret value" + " in the response from Azure Arc Managed Identity Endpoint", null)); } try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Authorization", String.format("Basic %s", secretKey)); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner scanner = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = scanner.hasNext() ? scanner.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithExchangeToken(TokenRequestContext request) { return clientAssertionAccessor.getValue() .flatMap(assertionToken -> Mono.fromCallable(() -> { String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId + "/oauth2/v2.0/token"; StringBuilder urlParametersBuilder = new StringBuilder(); urlParametersBuilder.append("client_assertion="); urlParametersBuilder.append(assertionToken); urlParametersBuilder.append("&client_assertion_type=urn:ietf:params:oauth:client-assertion-type" + ":jwt-bearer"); urlParametersBuilder.append("&client_id="); urlParametersBuilder.append(clientId); urlParametersBuilder.append("&grant_type=client_credentials"); urlParametersBuilder.append("&scope="); urlParametersBuilder.append(URLEncoder.encode(request.getScopes().get(0), StandardCharsets.UTF_8.name())); String urlParams = urlParametersBuilder.toString(); byte[] postData = urlParams.getBytes(StandardCharsets.UTF_8); int postDataLength = postData.length; HttpURLConnection connection = null; URL url = new URL(authorityUrl); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("POST"); connection.setRequestProperty("Content-Type", "application/x-www-form-urlencoded"); connection.setRequestProperty("Content-Length", Integer.toString(postDataLength)); connection.setDoOutput(true); try (DataOutputStream outputStream = new DataOutputStream(connection.getOutputStream())) { outputStream.write(postData); } connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } })); } /** * Asynchronously acquire a token from the Azure Service Fabric Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param identityHeader the identity header to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToServiceFabricManagedIdentityEndpoint(String identityEndpoint, String identityHeader, String thumbprint, TokenRequestContext request) { return Mono.fromCallable(() -> { HttpsURLConnection connection = null; String endpoint = identityEndpoint; String headerValue = identityHeader; String endpointVersion = SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION; String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } try { URL url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpsURLConnection) url.openConnection(); IdentitySslUtil.addTrustedCertificateThumbprint(connection, thumbprint, LOGGER); connection.setRequestMethod("GET"); if (headerValue != null) { connection.setRequestProperty("Secret", headerValue); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param identityHeader the identity header to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String identityEndpoint, String identityHeader, TokenRequestContext request) { return Mono.fromCallable(() -> { String endpoint; String headerValue; String endpointVersion; endpoint = identityEndpoint; headerValue = identityHeader; endpointVersion = IDENTITY_ENDPOINT_VERSION; String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } try { URL url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (headerValue != null) { if (IDENTITY_ENDPOINT_VERSION.equals(endpointVersion)) { connection.setRequestProperty("X-IDENTITY-HEADER", headerValue); } else { connection.setRequestProperty("Secret", headerValue); } } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", StandardCharsets.UTF_8.name())); payload.append("&resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } } catch (IOException exception) { return Mono.error(exception); } String endpoint = TRAILING_FORWARD_SLASHES.matcher(options.getImdsAuthorityHost()).replaceAll("") + IdentityConstants.DEFAULT_IMDS_TOKENPATH; return checkIMDSAvailable(endpoint).flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw LOGGER.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode; try { responseCode = connection.getResponseCode(); } catch (Exception e) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } if (responseCode == 400) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established.", null)); } if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw LOGGER.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw LOGGER.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable(String endpoint) { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", StandardCharsets.UTF_8.name())); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("%s?%s", endpoint, payload)); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } catch (Exception e) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } private String getSafeWorkingDirectory() { if (isWindowsPlatform()) { if (CoreUtils.isNullOrEmpty(DEFAULT_WINDOWS_SYSTEM_ROOT)) { return null; } return DEFAULT_WINDOWS_SYSTEM_ROOT + "\\system32"; } else { return DEFAULT_MAC_LINUX_PATH; } } private boolean isWindowsPlatform() { return System.getProperty("os.name").contains("Windows"); } private String redactInfo(String input) { return ACCESS_TOKEN_PATTERN.matcher(input).replaceAll("****"); } void openUrl(String url) throws IOException { Runtime rt = Runtime.getRuntime(); String os = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (os.contains("win")) { rt.exec("rundll32 url.dll,FileProtocolHandler " + url); } else if (os.contains("mac")) { rt.exec("open " + url); } else if (os.contains("nix") || os.contains("nux")) { rt.exec("xdg-open " + url); } else { LOGGER.error("Browser could not be opened - please open {} in a browser on this device.", url); } } private CompletableFuture<IAuthenticationResult> getFailedCompletableFuture(Exception e) { CompletableFuture<IAuthenticationResult> completableFuture = new CompletableFuture<>(); completableFuture.completeExceptionally(e); return completableFuture; } private void initializeHttpPipelineAdapter() { HttpPipeline httpPipeline = options.getHttpPipeline(); if (httpPipeline != null) { httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline); } else { HttpClient httpClient = options.getHttpClient(); if (httpClient != null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient)); } else if (options.getProxyOptions() == null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault())); } } } /** * Get the configured tenant id. * * @return the tenant id. */ public String getTenantId() { return tenantId; } /** * Get the configured client id. * * @return the client id. */ public String getClientId() { return clientId; } /** * Get the configured identity client options. * * @return the client options. */ public IdentityClientOptions getIdentityClientOptions() { return options; } private boolean isADFSTenant() { return this.tenantId.equals(ADFS_TENANT); } private byte[] getCertificateBytes() throws IOException { if (certificatePath != null) { return Files.readAllBytes(Paths.get(certificatePath)); } else if (certificate != null) { ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); byte[] buffer = new byte[1024]; int read = certificate.read(buffer, 0, buffer.length); while (read != -1) { outputStream.write(buffer, 0, read); read = certificate.read(buffer, 0, buffer.length); } return outputStream.toByteArray(); } else { return new byte[0]; } } private InputStream getCertificateInputStream() throws IOException { if (certificatePath != null) { return new BufferedInputStream(new FileInputStream(certificatePath)); } else { return certificate; } } }
Long term we should look into replacing simple String.formats with string concatenation, in most cases it will be easier to read but also be better performance wise as String.format needs to parse the formattable string to determine replacement locations and their handling
public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", StandardCharsets.UTF_8.name())); payload.append("&resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } } catch (IOException exception) { return Mono.error(exception); } String endpoint = TRAILING_FORWARD_SLASHES.matcher(options.getImdsAuthorityHost()).replaceAll("") + IdentityConstants.DEFAULT_IMDS_TOKENPATH; return checkIMDSAvailable(endpoint).flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw LOGGER.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode; try { responseCode = connection.getResponseCode(); } catch (Exception e) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } if (responseCode == 400) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established.", null)); } if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw LOGGER.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw LOGGER.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); }
url = new URL(String.format("%s?%s", endpoint, payload));
public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", StandardCharsets.UTF_8.name())); payload.append("&resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } } catch (IOException exception) { return Mono.error(exception); } String endpoint = TRAILING_FORWARD_SLASHES.matcher(options.getImdsAuthorityHost()).replaceAll("") + IdentityConstants.DEFAULT_IMDS_TOKENPATH; return checkIMDSAvailable(endpoint).flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw LOGGER.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode; try { responseCode = connection.getResponseCode(); } catch (Exception e) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } if (responseCode == 400) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established.", null)); } if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw LOGGER.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw LOGGER.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private static final String WINDOWS_STARTER = "cmd.exe"; private static final String LINUX_MAC_STARTER = "/bin/sh"; private static final String WINDOWS_SWITCHER = "/c"; private static final String LINUX_MAC_SWITCHER = "-c"; private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized"; private static final Pattern LINUX_MAC_PROCESS_ERROR_MESSAGE = Pattern.compile("(.*)az:(.*)not found"); private static final String DEFAULT_WINDOWS_SYSTEM_ROOT = System.getenv("SystemRoot"); private static final String DEFAULT_WINDOWS_PS_EXECUTABLE = "pwsh.exe"; private static final String LEGACY_WINDOWS_PS_EXECUTABLE = "powershell.exe"; private static final String DEFAULT_LINUX_PS_EXECUTABLE = "pwsh"; private static final String DEFAULT_MAC_LINUX_PATH = "/bin/"; private static final Duration REFRESH_OFFSET = Duration.ofMinutes(5); private static final String IDENTITY_ENDPOINT_VERSION = "2019-08-01"; private static final String MSI_ENDPOINT_VERSION = "2017-09-01"; private static final String ADFS_TENANT = "adfs"; private static final String HTTP_LOCALHOST = "http: private static final String SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION = "2019-07-01-preview"; private static final ClientLogger LOGGER = new ClientLogger(IdentityClient.class); private static final Pattern ACCESS_TOKEN_PATTERN = Pattern.compile("\"accessToken\": \"(.*?)(\"|$)"); private static final Pattern TRAILING_FORWARD_SLASHES = Pattern.compile("/+$"); private final IdentityClientOptions options; private final String tenantId; private final String clientId; private final String resourceId; private final String clientSecret; private final String clientAssertionFilePath; private final InputStream certificate; private final String certificatePath; private final Supplier<String> clientAssertionSupplier; private final String certificatePassword; private HttpPipelineAdapter httpPipelineAdapter; private final SynchronizedAccessor<PublicClientApplication> publicClientApplicationAccessor; private final SynchronizedAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor; private final SynchronizedAccessor<String> clientAssertionAccessor; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param clientSecret the client secret of the application. * @param resourceId the resource ID of the application * @param certificatePath the path to the PKCS12 or PEM certificate of the application. * @param certificate the PKCS12 or PEM certificate of the application. * @param certificatePassword the password protecting the PFX certificate. * @param isSharedTokenCacheCredential Indicate whether the credential is * {@link com.azure.identity.SharedTokenCacheCredential} or not. * @param clientAssertionTimeout the timeout to use for the client assertion. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, String clientSecret, String certificatePath, String clientAssertionFilePath, String resourceId, Supplier<String> clientAssertionSupplier, InputStream certificate, String certificatePassword, boolean isSharedTokenCacheCredential, Duration clientAssertionTimeout, IdentityClientOptions options) { if (tenantId == null) { tenantId = "organizations"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.resourceId = resourceId; this.clientSecret = clientSecret; this.clientAssertionFilePath = clientAssertionFilePath; this.certificatePath = certificatePath; this.certificate = certificate; this.certificatePassword = certificatePassword; this.clientAssertionSupplier = clientAssertionSupplier; this.options = options; this.publicClientApplicationAccessor = new SynchronizedAccessor<>(() -> getPublicClientApplication(isSharedTokenCacheCredential)); this.confidentialClientApplicationAccessor = new SynchronizedAccessor<>(() -> getConfidentialClientApplication()); this.clientAssertionAccessor = clientAssertionTimeout == null ? new SynchronizedAccessor<>(() -> parseClientAssertion(), Duration.ofMinutes(5)) : new SynchronizedAccessor<>(() -> parseClientAssertion(), clientAssertionTimeout); } private Mono<ConfidentialClientApplication> getConfidentialClientApplication() { return Mono.defer(() -> { if (clientId == null) { return Mono.error(LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication."))); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; IClientCredential credential; if (clientSecret != null) { credential = ClientCredentialFactory.createFromSecret(clientSecret); } else if (certificate != null || certificatePath != null) { try { if (certificatePassword == null) { byte[] pemCertificateBytes = getCertificateBytes(); List<X509Certificate> x509CertificateList = CertificateUtil.publicKeyFromPem(pemCertificateBytes); PrivateKey privateKey = CertificateUtil.privateKeyFromPem(pemCertificateBytes); if (x509CertificateList.size() == 1) { credential = ClientCredentialFactory.createFromCertificate( privateKey, x509CertificateList.get(0)); } else { credential = ClientCredentialFactory.createFromCertificateChain( privateKey, x509CertificateList); } } else { try (InputStream pfxCertificateStream = getCertificateInputStream()) { credential = ClientCredentialFactory.createFromCertificate(pfxCertificateStream, certificatePassword); } } } catch (IOException | GeneralSecurityException e) { return Mono.error(LOGGER.logExceptionAsError(new RuntimeException( "Failed to parse the certificate for the credential: " + e.getMessage(), e))); } } else if (clientAssertionSupplier != null) { credential = ClientCredentialFactory.createFromClientAssertion(clientAssertionSupplier.get()); } else { return Mono.error(LOGGER.logExceptionAsError( new IllegalArgumentException("Must provide client secret or client certificate path." + " To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, credential); try { applicationBuilder = applicationBuilder.authority(authorityUrl) .validateAuthority(options.getAuthorityValidation()); } catch (MalformedURLException e) { return Mono.error(LOGGER.logExceptionAsWarning(new IllegalStateException(e))); } applicationBuilder.sendX5c(options.isIncludeX5c()); initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl() .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); applicationBuilder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { return Mono.error(LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t))); } } if (options.getRegionalAuthority() != null) { if (options.getRegionalAuthority() == RegionalAuthority.AUTO_DISCOVER_REGION) { applicationBuilder.autoDetectRegion(true); } else { applicationBuilder.azureRegion(options.getRegionalAuthority().toString()); } } ConfidentialClientApplication confidentialClientApplication = applicationBuilder.build(); return tokenCache != null ? tokenCache.registerCache() .map(ignored -> confidentialClientApplication) : Mono.just(confidentialClientApplication); }); } private Mono<String> parseClientAssertion() { return Mono.fromCallable(() -> { if (clientAssertionFilePath != null) { byte[] encoded = Files.readAllBytes(Paths.get(clientAssertionFilePath)); return new String(encoded, StandardCharsets.UTF_8); } else { throw LOGGER.logExceptionAsError(new IllegalStateException( "Client Assertion File Path is not provided." + " It should be provided to authenticate with client assertion." )); } }); } private Mono<PublicClientApplication> getPublicClientApplication(boolean sharedTokenCacheCredential) { return Mono.defer(() -> { if (clientId == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl) .validateAuthority(options.getAuthorityValidation()); } catch (MalformedURLException e) { throw LOGGER.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { publicClientApplicationBuilder.httpClient(httpPipelineAdapter); } else { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { publicClientApplicationBuilder.executorService(options.getExecutorService()); } if (!options.isCp1Disabled()) { Set<String> set = new HashSet<>(1); set.add("CP1"); publicClientApplicationBuilder.clientCapabilities(set); } return Mono.just(publicClientApplicationBuilder); }).flatMap(builder -> { TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl() .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); builder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t)); } } PublicClientApplication publicClientApplication = builder.build(); return tokenCache != null ? tokenCache.registerCache() .map(ignored -> publicClientApplication) : Mono.just(publicClientApplication); }); } public Mono<MsalToken> authenticateWithIntelliJ(TokenRequestContext request) { try { IntelliJCacheAccessor cacheAccessor = new IntelliJCacheAccessor(options.getIntelliJKeePassDatabasePath()); IntelliJAuthMethodDetails authDetails; try { authDetails = cacheAccessor.getAuthDetailsIfAvailable(); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available.", e))); } if (authDetails == null) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available." + " Please log in with Azure Tools for IntelliJ plugin in the IDE."))); } String authType = authDetails.getAuthMethod(); if ("SP".equalsIgnoreCase(authType)) { Map<String, String> spDetails = cacheAccessor .getIntellijServicePrincipalDetails(authDetails.getCredFilePath()); String authorityUrl = spDetails.get("authURL") + spDetails.get("tenant"); try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(spDetails.get("client"), ClientCredentialFactory.createFromSecret(spDetails.get("key"))) .authority(authorityUrl).validateAuthority(options.getAuthorityValidation()); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } } else if ("DC".equalsIgnoreCase(authType)) { LOGGER.verbose("IntelliJ Authentication => Device Code Authentication scheme detected in Azure Tools" + " for IntelliJ Plugin."); if (isADFSTenant()) { LOGGER.verbose("IntelliJ Authentication => The input tenant is detected to be ADFS and" + " the ADFS tenants are not supported via IntelliJ Authentication currently."); return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJCredential " + "authentication unavailable. ADFS tenant/authorities are not supported."))); } try { JsonNode intelliJCredentials = cacheAccessor.getDeviceCodeCredentials(); String refreshToken = intelliJCredentials.get("refreshToken").textValue(); RefreshTokenParameters.RefreshTokenParametersBuilder refreshTokenParametersBuilder = RefreshTokenParameters.builder(new HashSet<>(request.getScopes()), refreshToken); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); refreshTokenParametersBuilder.claims(customClaimRequest); } return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(refreshTokenParametersBuilder.build())) .map(MsalToken::new)); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e)); } } else { LOGGER.verbose("IntelliJ Authentication = > Only Service Principal and Device Code Authentication" + " schemes are currently supported via IntelliJ Credential currently. Please ensure you used one" + " of those schemes from Azure Tools for IntelliJ plugin."); return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available." + " Please login with Azure Tools for IntelliJ plugin in the IDE."))); } } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { StringBuilder azCommand = new StringBuilder("az account get-access-token --output json --resource "); String scopes = ScopeUtil.scopesToResource(request.getScopes()); try { ScopeUtil.validateScope(scopes); } catch (IllegalArgumentException ex) { return Mono.error(LOGGER.logExceptionAsError(ex)); } azCommand.append(scopes); String tenant = IdentityUtil.resolveTenantId(null, request, options); if (!CoreUtils.isNullOrEmpty(tenant)) { azCommand.append("--tenant ").append(tenant); } AccessToken token; try { String starter; String switcher; if (isWindowsPlatform()) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, azCommand.toString()); String workingDirectory = getSafeWorkingDirectory(); if (workingDirectory != null) { builder.directory(new File(workingDirectory)); } else { throw LOGGER.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be" + " found to execute CLI command from. To mitigate this issue, please refer to the troubleshooting " + " guidelines here at https: } builder.redirectErrorStream(true); Process process = builder.start(); StringBuilder output = new StringBuilder(); try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8.name()))) { String line; while (true) { line = reader.readLine(); if (line == null) { break; } if (line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || LINUX_MAC_PROCESS_ERROR_MESSAGE.matcher(line).matches()) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable. Azure CLI not installed." + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } output.append(line); } } String processOutput = output.toString(); process.waitFor(10, TimeUnit.SECONDS); if (process.exitValue() != 0) { if (processOutput.length() > 0) { String redactedOutput = redactInfo(processOutput); if (redactedOutput.contains("az login") || redactedOutput.contains("az account set")) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable." + " Please run 'az login' to set up account. To further mitigate this" + " issue, please refer to the troubleshooting guidelines here at " + "https: } throw LOGGER.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null)); } else { throw LOGGER.logExceptionAsError( new ClientAuthenticationException("Failed to invoke Azure CLI ", null)); } } LOGGER.verbose("Azure CLI Authentication => A token response was received from Azure CLI, deserializing the" + " response into an Access Token."); Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME) .atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException | InterruptedException e) { throw LOGGER.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(e instanceof CredentialUnavailableException ? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e) : LOGGER.logExceptionAsError(e)); } return Mono.just(token); } /** * Asynchronously acquire a token from Active Directory with Azure Power Shell. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzurePowerShell(TokenRequestContext request) { List<CredentialUnavailableException> exceptions = new ArrayList<>(2); PowershellManager defaultPowerShellManager = new PowershellManager(Platform.isWindows() ? DEFAULT_WINDOWS_PS_EXECUTABLE : DEFAULT_LINUX_PS_EXECUTABLE); PowershellManager legacyPowerShellManager = Platform.isWindows() ? new PowershellManager(LEGACY_WINDOWS_PS_EXECUTABLE) : null; List<PowershellManager> powershellManagers = new ArrayList<>(2); powershellManagers.add(defaultPowerShellManager); if (legacyPowerShellManager != null) { powershellManagers.add(legacyPowerShellManager); } return Flux.fromIterable(powershellManagers) .flatMap(powershellManager -> getAccessTokenFromPowerShell(request, powershellManager) .onErrorResume(t -> { if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) { return Mono.error(new ClientAuthenticationException( "Azure Powershell authentication failed. Error Details: " + t.getMessage() + ". To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: null, t)); } exceptions.add((CredentialUnavailableException) t); return Mono.empty(); }), 1) .next() .switchIfEmpty(Mono.defer(() -> { CredentialUnavailableException last = exceptions.get(exceptions.size() - 1); for (int z = exceptions.size() - 2; z >= 0; z--) { CredentialUnavailableException current = exceptions.get(z); last = new CredentialUnavailableException("Azure PowerShell authentication failed using default" + "powershell(pwsh) with following error: " + current.getMessage() + "\r\n" + "Azure PowerShell authentication failed using powershell-core(powershell)" + " with following error: " + last.getMessage(), last.getCause()); } return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, (last))); })); } /** * Asynchronously acquire a token from Active Directory with Azure PowerShell. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithOBO(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> confidentialClient.acquireToken(OnBehalfOfParameters .builder(new HashSet<>(request.getScopes()), options.getUserAssertion()) .tenant(IdentityUtil.resolveTenantId(tenantId, request, options)) .build())) .map(MsalToken::new)); } private Mono<AccessToken> getAccessTokenFromPowerShell(TokenRequestContext request, PowershellManager powershellManager) { return powershellManager.initSession() .flatMap(manager -> { String azAccountsCommand = "Import-Module Az.Accounts -MinimumVersion 2.2.0 -PassThru"; return manager.runCommand(azAccountsCommand) .flatMap(output -> { if (output.contains("The specified module 'Az.Accounts' with version '2.2.0' was not loaded " + "because no valid module file")) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Az.Account module with version >= 2.2.0 is not installed. It needs to be installed to" + " use Azure PowerShell Credential."))); } LOGGER.verbose("Az.accounts module was found installed."); StringBuilder accessTokenCommand = new StringBuilder("Get-AzAccessToken -ResourceUrl "); accessTokenCommand.append(ScopeUtil.scopesToResource(request.getScopes())); accessTokenCommand.append(" | ConvertTo-Json"); String command = accessTokenCommand.toString(); LOGGER.verbose("Azure Powershell Authentication => Executing the command `%s` in Azure " + "Powershell to retrieve the Access Token.", accessTokenCommand); return manager.runCommand(accessTokenCommand.toString()) .flatMap(out -> { if (out.contains("Run Connect-AzAccount to login")) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Run Connect-AzAccount to login to Azure account in PowerShell."))); } try { LOGGER.verbose("Azure Powershell Authentication => Attempting to deserialize the " + "received response from Azure Powershell."); Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(out, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("Token"); String time = objectMap.get("ExpiresOn"); OffsetDateTime expiresOn = OffsetDateTime.parse(time) .withOffsetSameInstant(ZoneOffset.UTC); return Mono.just(new AccessToken(accessToken, expiresOn)); } catch (IOException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Encountered error when deserializing response from Azure Power Shell.", e))); } }); }); }).doFinally(ignored -> powershellManager.close()); } /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClient(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { ClientCredentialParameters.ClientCredentialParametersBuilder builder = ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); return confidentialClient.acquireToken(builder.build()); } )).map(MsalToken::new); } private HttpPipeline setupPipeline(HttpClient httpClient) { List<HttpPipelinePolicy> policies = new ArrayList<>(); HttpLogOptions httpLogOptions = new HttpLogOptions(); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RetryPolicy()); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder().httpClient(httpClient) .policies(policies.toArray(new HttpPipelinePolicy[0])).build(); } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { UserNamePasswordParameters.UserNamePasswordParametersBuilder userNamePasswordParametersBuilder = UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest .formatAsClaimsRequest(request.getClaims()); userNamePasswordParametersBuilder.claims(customClaimRequest); } userNamePasswordParametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); return pc.acquireToken(userNamePasswordParametersBuilder.build()); } )).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with username and " + "password. To mitigate this issue, please refer to the troubleshooting guidelines " + "here at https: null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @param account the account used to log in to acquire the last token * @return a Publisher that emits an AccessToken */ @SuppressWarnings("deprecation") public Mono<MsalToken> authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); parametersBuilder.forceRefresh(true); } if (account != null) { parametersBuilder = parametersBuilder.account(account); } parametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); try { return pc.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET))) .switchIfEmpty(Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder forceParametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())).forceRefresh(true); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest .formatAsClaimsRequest(request.getClaims()); forceParametersBuilder.claims(customClaimRequest); } if (account != null) { forceParametersBuilder = forceParametersBuilder.account(account); } forceParametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); try { return pc.acquireTokenSilently(forceParametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new))); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ @SuppressWarnings("deprecation") public Mono<AccessToken> authenticateWithConfidentialClientCache(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())) .tenant(IdentityUtil.resolveTenantId(tenantId, request, options)); try { return confidentialClient.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(ar -> (AccessToken) new MsalToken(ar)) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET)))); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return publicClientApplicationAccessor.getValue().flatMap(pc -> Mono.fromFuture(() -> { DeviceCodeFlowParameters.DeviceCodeFlowParametersBuilder parametersBuilder = DeviceCodeFlowParameters.builder( new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept( new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } return pc.acquireToken(parametersBuilder.build()); }).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with device code", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with Visual Studio cached refresh token. * * @param request the details of the token request * @return a Publisher that emits an AccessToken. */ public Mono<MsalToken> authenticateWithVsCodeCredential(TokenRequestContext request, String cloud) { if (isADFSTenant()) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("VsCodeCredential " + "authentication unavailable. ADFS tenant/authorities are not supported. " + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } VisualStudioCacheAccessor accessor = new VisualStudioCacheAccessor(); String credential = null; try { credential = accessor.getCredentials("VS Code Azure", cloud); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e)); } RefreshTokenParameters.RefreshTokenParametersBuilder parametersBuilder = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), credential); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parametersBuilder.build())) .onErrorResume(t -> { if (t instanceof MsalInteractionRequiredException) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("Failed to acquire token with" + " VS code credential." + " To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } return Mono.error(new ClientAuthenticationException("Failed to acquire token with" + " VS code credential", null, t)); }) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { AuthorizationCodeParameters.AuthorizationCodeParametersBuilder parametersBuilder = AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } Mono<IAuthenticationResult> acquireToken; if (clientSecret != null) { acquireToken = confidentialClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build()))); } else { acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build()))); } return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with authorization code", null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @param redirectUrl the redirect URL to listen on and receive security code * @param loginHint the username suggestion to pre-fill the login page's username/email address field * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, Integer port, String redirectUrl, String loginHint) { URI redirectUri; String redirect; if (port != null) { redirect = HTTP_LOCALHOST + ":" + port; } else if (redirectUrl != null) { redirect = redirectUrl; } else { redirect = HTTP_LOCALHOST; } try { redirectUri = new URI(redirect); } catch (URISyntaxException e) { return Mono.error(LOGGER.logExceptionAsError(new RuntimeException(e))); } InteractiveRequestParameters.InteractiveRequestParametersBuilder builder = InteractiveRequestParameters.builder(redirectUri) .scopes(new HashSet<>(request.getScopes())) .prompt(Prompt.SELECT_ACCOUNT) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); builder.claims(customClaimRequest); } if (loginHint != null) { builder.loginHint(loginHint); } Mono<IAuthenticationResult> acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(builder.build()))); return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with Interactive Browser Authentication.", null, t)).map(MsalToken::new); } /** * Gets token from shared token cache * */ public Mono<MsalToken> authenticateWithSharedTokenCache(TokenRequestContext request, String username) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.getAccounts()) .onErrorMap(t -> new CredentialUnavailableException( "Cannot get accounts from token cache. Error: " + t.getMessage(), t)) .flatMap(set -> { IAccount requestedAccount; Map<String, IAccount> accounts = new HashMap<>(); if (set.isEmpty()) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("SharedTokenCacheCredential " + "authentication unavailable. No accounts were found in the cache."))); } for (IAccount cached : set) { if (username == null || username.equals(cached.username())) { if (!accounts.containsKey(cached.homeAccountId())) { accounts.put(cached.homeAccountId(), cached); } } } if (accounts.isEmpty()) { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. No account matching the specified username: %s was " + "found in the cache.", username))); } else if (accounts.size() > 1) { if (username == null) { return Mono.error(new RuntimeException("SharedTokenCacheCredential authentication " + "unavailable. Multiple accounts were found in the cache. Use username and " + "tenant id to disambiguate.")); } else { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. Multiple accounts matching the specified username: " + "%s were found in the cache.", username))); } } else { requestedAccount = accounts.values().iterator().next(); } return authenticateWithPublicClientCache(request, requestedAccount); })); } /** * Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToArcManagedIdentityEndpoint(String identityEndpoint, TokenRequestContext request) { return Mono.fromCallable(() -> { HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(ScopeUtil.scopesToResource(request.getScopes()), StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode("2019-11-01", StandardCharsets.UTF_8.name())); URL url = new URL(String.format("%s?%s", identityEndpoint, payload)); String secretKey = null; try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); } catch (IOException e) { if (connection == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to initialize " + "Http URL connection to the endpoint.", null, e)); } int status = connection.getResponseCode(); if (status != 401) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException(String.format("Expected a 401" + " Unauthorized response from Azure Arc Managed Identity Endpoint, received: %d", status), null, e)); } String realm = connection.getHeaderField("WWW-Authenticate"); if (realm == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a value" + " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint", null)); } int separatorIndex = realm.indexOf("="); if (separatorIndex == -1) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a correct value" + " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint", null)); } String secretKeyPath = realm.substring(separatorIndex + 1); secretKey = new String(Files.readAllBytes(Paths.get(secretKeyPath)), StandardCharsets.UTF_8); } finally { if (connection != null) { connection.disconnect(); } } if (secretKey == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a secret value" + " in the response from Azure Arc Managed Identity Endpoint", null)); } try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Authorization", String.format("Basic %s", secretKey)); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner scanner = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = scanner.hasNext() ? scanner.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithExchangeToken(TokenRequestContext request) { return clientAssertionAccessor.getValue() .flatMap(assertionToken -> Mono.fromCallable(() -> { String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId + "/oauth2/v2.0/token"; StringBuilder urlParametersBuilder = new StringBuilder(); urlParametersBuilder.append("client_assertion="); urlParametersBuilder.append(assertionToken); urlParametersBuilder.append("&client_assertion_type=urn:ietf:params:oauth:client-assertion-type" + ":jwt-bearer"); urlParametersBuilder.append("&client_id="); urlParametersBuilder.append(clientId); urlParametersBuilder.append("&grant_type=client_credentials"); urlParametersBuilder.append("&scope="); urlParametersBuilder.append(URLEncoder.encode(request.getScopes().get(0), StandardCharsets.UTF_8.name())); String urlParams = urlParametersBuilder.toString(); byte[] postData = urlParams.getBytes(StandardCharsets.UTF_8); int postDataLength = postData.length; HttpURLConnection connection = null; URL url = new URL(authorityUrl); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("POST"); connection.setRequestProperty("Content-Type", "application/x-www-form-urlencoded"); connection.setRequestProperty("Content-Length", Integer.toString(postDataLength)); connection.setDoOutput(true); try (DataOutputStream outputStream = new DataOutputStream(connection.getOutputStream())) { outputStream.write(postData); } connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } })); } /** * Asynchronously acquire a token from the Azure Service Fabric Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param identityHeader the identity header to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToServiceFabricManagedIdentityEndpoint(String identityEndpoint, String identityHeader, String thumbprint, TokenRequestContext request) { return Mono.fromCallable(() -> { HttpsURLConnection connection = null; String endpoint = identityEndpoint; String headerValue = identityHeader; String endpointVersion = SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION; String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } try { URL url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpsURLConnection) url.openConnection(); IdentitySslUtil.addTrustedCertificateThumbprint(connection, thumbprint, LOGGER); connection.setRequestMethod("GET"); if (headerValue != null) { connection.setRequestProperty("Secret", headerValue); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param identityHeader the identity header to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String identityEndpoint, String identityHeader, TokenRequestContext request) { return Mono.fromCallable(() -> { String endpoint; String headerValue; String endpointVersion; endpoint = identityEndpoint; headerValue = identityHeader; endpointVersion = IDENTITY_ENDPOINT_VERSION; String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } try { URL url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (headerValue != null) { if (IDENTITY_ENDPOINT_VERSION.equals(endpointVersion)) { connection.setRequestProperty("X-IDENTITY-HEADER", headerValue); } else { connection.setRequestProperty("Secret", headerValue); } } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ private Mono<Boolean> checkIMDSAvailable(String endpoint) { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", StandardCharsets.UTF_8.name())); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("%s?%s", endpoint, payload)); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } catch (Exception e) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } private String getSafeWorkingDirectory() { if (isWindowsPlatform()) { if (CoreUtils.isNullOrEmpty(DEFAULT_WINDOWS_SYSTEM_ROOT)) { return null; } return DEFAULT_WINDOWS_SYSTEM_ROOT + "\\system32"; } else { return DEFAULT_MAC_LINUX_PATH; } } private boolean isWindowsPlatform() { return System.getProperty("os.name").contains("Windows"); } private String redactInfo(String input) { return ACCESS_TOKEN_PATTERN.matcher(input).replaceAll("****"); } void openUrl(String url) throws IOException { Runtime rt = Runtime.getRuntime(); String os = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (os.contains("win")) { rt.exec("rundll32 url.dll,FileProtocolHandler " + url); } else if (os.contains("mac")) { rt.exec("open " + url); } else if (os.contains("nix") || os.contains("nux")) { rt.exec("xdg-open " + url); } else { LOGGER.error("Browser could not be opened - please open {} in a browser on this device.", url); } } private CompletableFuture<IAuthenticationResult> getFailedCompletableFuture(Exception e) { CompletableFuture<IAuthenticationResult> completableFuture = new CompletableFuture<>(); completableFuture.completeExceptionally(e); return completableFuture; } private void initializeHttpPipelineAdapter() { HttpPipeline httpPipeline = options.getHttpPipeline(); if (httpPipeline != null) { httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline); } else { HttpClient httpClient = options.getHttpClient(); if (httpClient != null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient)); } else if (options.getProxyOptions() == null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault())); } } } /** * Get the configured tenant id. * * @return the tenant id. */ public String getTenantId() { return tenantId; } /** * Get the configured client id. * * @return the client id. */ public String getClientId() { return clientId; } /** * Get the configured identity client options. * * @return the client options. */ public IdentityClientOptions getIdentityClientOptions() { return options; } private boolean isADFSTenant() { return this.tenantId.equals(ADFS_TENANT); } private byte[] getCertificateBytes() throws IOException { if (certificatePath != null) { return Files.readAllBytes(Paths.get(certificatePath)); } else if (certificate != null) { ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); byte[] buffer = new byte[1024]; int read = certificate.read(buffer, 0, buffer.length); while (read != -1) { outputStream.write(buffer, 0, read); read = certificate.read(buffer, 0, buffer.length); } return outputStream.toByteArray(); } else { return new byte[0]; } } private InputStream getCertificateInputStream() throws IOException { if (certificatePath != null) { return new BufferedInputStream(new FileInputStream(certificatePath)); } else { return certificate; } } }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private static final String WINDOWS_STARTER = "cmd.exe"; private static final String LINUX_MAC_STARTER = "/bin/sh"; private static final String WINDOWS_SWITCHER = "/c"; private static final String LINUX_MAC_SWITCHER = "-c"; private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized"; private static final Pattern LINUX_MAC_PROCESS_ERROR_MESSAGE = Pattern.compile("(.*)az:(.*)not found"); private static final String DEFAULT_WINDOWS_SYSTEM_ROOT = System.getenv("SystemRoot"); private static final String DEFAULT_WINDOWS_PS_EXECUTABLE = "pwsh.exe"; private static final String LEGACY_WINDOWS_PS_EXECUTABLE = "powershell.exe"; private static final String DEFAULT_LINUX_PS_EXECUTABLE = "pwsh"; private static final String DEFAULT_MAC_LINUX_PATH = "/bin/"; private static final Duration REFRESH_OFFSET = Duration.ofMinutes(5); private static final String IDENTITY_ENDPOINT_VERSION = "2019-08-01"; private static final String MSI_ENDPOINT_VERSION = "2017-09-01"; private static final String ADFS_TENANT = "adfs"; private static final String HTTP_LOCALHOST = "http: private static final String SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION = "2019-07-01-preview"; private static final ClientLogger LOGGER = new ClientLogger(IdentityClient.class); private static final Pattern ACCESS_TOKEN_PATTERN = Pattern.compile("\"accessToken\": \"(.*?)(\"|$)"); private static final Pattern TRAILING_FORWARD_SLASHES = Pattern.compile("/+$"); private final IdentityClientOptions options; private final String tenantId; private final String clientId; private final String resourceId; private final String clientSecret; private final String clientAssertionFilePath; private final InputStream certificate; private final String certificatePath; private final Supplier<String> clientAssertionSupplier; private final String certificatePassword; private HttpPipelineAdapter httpPipelineAdapter; private final SynchronizedAccessor<PublicClientApplication> publicClientApplicationAccessor; private final SynchronizedAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor; private final SynchronizedAccessor<String> clientAssertionAccessor; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param clientSecret the client secret of the application. * @param resourceId the resource ID of the application * @param certificatePath the path to the PKCS12 or PEM certificate of the application. * @param certificate the PKCS12 or PEM certificate of the application. * @param certificatePassword the password protecting the PFX certificate. * @param isSharedTokenCacheCredential Indicate whether the credential is * {@link com.azure.identity.SharedTokenCacheCredential} or not. * @param clientAssertionTimeout the timeout to use for the client assertion. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, String clientSecret, String certificatePath, String clientAssertionFilePath, String resourceId, Supplier<String> clientAssertionSupplier, InputStream certificate, String certificatePassword, boolean isSharedTokenCacheCredential, Duration clientAssertionTimeout, IdentityClientOptions options) { if (tenantId == null) { tenantId = "organizations"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.resourceId = resourceId; this.clientSecret = clientSecret; this.clientAssertionFilePath = clientAssertionFilePath; this.certificatePath = certificatePath; this.certificate = certificate; this.certificatePassword = certificatePassword; this.clientAssertionSupplier = clientAssertionSupplier; this.options = options; this.publicClientApplicationAccessor = new SynchronizedAccessor<>(() -> getPublicClientApplication(isSharedTokenCacheCredential)); this.confidentialClientApplicationAccessor = new SynchronizedAccessor<>(() -> getConfidentialClientApplication()); this.clientAssertionAccessor = clientAssertionTimeout == null ? new SynchronizedAccessor<>(() -> parseClientAssertion(), Duration.ofMinutes(5)) : new SynchronizedAccessor<>(() -> parseClientAssertion(), clientAssertionTimeout); } private Mono<ConfidentialClientApplication> getConfidentialClientApplication() { return Mono.defer(() -> { if (clientId == null) { return Mono.error(LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication."))); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; IClientCredential credential; if (clientSecret != null) { credential = ClientCredentialFactory.createFromSecret(clientSecret); } else if (certificate != null || certificatePath != null) { try { if (certificatePassword == null) { byte[] pemCertificateBytes = getCertificateBytes(); List<X509Certificate> x509CertificateList = CertificateUtil.publicKeyFromPem(pemCertificateBytes); PrivateKey privateKey = CertificateUtil.privateKeyFromPem(pemCertificateBytes); if (x509CertificateList.size() == 1) { credential = ClientCredentialFactory.createFromCertificate( privateKey, x509CertificateList.get(0)); } else { credential = ClientCredentialFactory.createFromCertificateChain( privateKey, x509CertificateList); } } else { try (InputStream pfxCertificateStream = getCertificateInputStream()) { credential = ClientCredentialFactory.createFromCertificate(pfxCertificateStream, certificatePassword); } } } catch (IOException | GeneralSecurityException e) { return Mono.error(LOGGER.logExceptionAsError(new RuntimeException( "Failed to parse the certificate for the credential: " + e.getMessage(), e))); } } else if (clientAssertionSupplier != null) { credential = ClientCredentialFactory.createFromClientAssertion(clientAssertionSupplier.get()); } else { return Mono.error(LOGGER.logExceptionAsError( new IllegalArgumentException("Must provide client secret or client certificate path." + " To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, credential); try { applicationBuilder = applicationBuilder.authority(authorityUrl) .validateAuthority(options.getAuthorityValidation()); } catch (MalformedURLException e) { return Mono.error(LOGGER.logExceptionAsWarning(new IllegalStateException(e))); } applicationBuilder.sendX5c(options.isIncludeX5c()); initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl() .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); applicationBuilder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { return Mono.error(LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t))); } } if (options.getRegionalAuthority() != null) { if (options.getRegionalAuthority() == RegionalAuthority.AUTO_DISCOVER_REGION) { applicationBuilder.autoDetectRegion(true); } else { applicationBuilder.azureRegion(options.getRegionalAuthority().toString()); } } ConfidentialClientApplication confidentialClientApplication = applicationBuilder.build(); return tokenCache != null ? tokenCache.registerCache() .map(ignored -> confidentialClientApplication) : Mono.just(confidentialClientApplication); }); } private Mono<String> parseClientAssertion() { return Mono.fromCallable(() -> { if (clientAssertionFilePath != null) { byte[] encoded = Files.readAllBytes(Paths.get(clientAssertionFilePath)); return new String(encoded, StandardCharsets.UTF_8); } else { throw LOGGER.logExceptionAsError(new IllegalStateException( "Client Assertion File Path is not provided." + " It should be provided to authenticate with client assertion." )); } }); } private Mono<PublicClientApplication> getPublicClientApplication(boolean sharedTokenCacheCredential) { return Mono.defer(() -> { if (clientId == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl) .validateAuthority(options.getAuthorityValidation()); } catch (MalformedURLException e) { throw LOGGER.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { publicClientApplicationBuilder.httpClient(httpPipelineAdapter); } else { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { publicClientApplicationBuilder.executorService(options.getExecutorService()); } if (!options.isCp1Disabled()) { Set<String> set = new HashSet<>(1); set.add("CP1"); publicClientApplicationBuilder.clientCapabilities(set); } return Mono.just(publicClientApplicationBuilder); }).flatMap(builder -> { TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl() .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); builder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t)); } } PublicClientApplication publicClientApplication = builder.build(); return tokenCache != null ? tokenCache.registerCache() .map(ignored -> publicClientApplication) : Mono.just(publicClientApplication); }); } public Mono<MsalToken> authenticateWithIntelliJ(TokenRequestContext request) { try { IntelliJCacheAccessor cacheAccessor = new IntelliJCacheAccessor(options.getIntelliJKeePassDatabasePath()); IntelliJAuthMethodDetails authDetails; try { authDetails = cacheAccessor.getAuthDetailsIfAvailable(); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available.", e))); } if (authDetails == null) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available." + " Please log in with Azure Tools for IntelliJ plugin in the IDE."))); } String authType = authDetails.getAuthMethod(); if ("SP".equalsIgnoreCase(authType)) { Map<String, String> spDetails = cacheAccessor .getIntellijServicePrincipalDetails(authDetails.getCredFilePath()); String authorityUrl = spDetails.get("authURL") + spDetails.get("tenant"); try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(spDetails.get("client"), ClientCredentialFactory.createFromSecret(spDetails.get("key"))) .authority(authorityUrl).validateAuthority(options.getAuthorityValidation()); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } } else if ("DC".equalsIgnoreCase(authType)) { LOGGER.verbose("IntelliJ Authentication => Device Code Authentication scheme detected in Azure Tools" + " for IntelliJ Plugin."); if (isADFSTenant()) { LOGGER.verbose("IntelliJ Authentication => The input tenant is detected to be ADFS and" + " the ADFS tenants are not supported via IntelliJ Authentication currently."); return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJCredential " + "authentication unavailable. ADFS tenant/authorities are not supported."))); } try { JsonNode intelliJCredentials = cacheAccessor.getDeviceCodeCredentials(); String refreshToken = intelliJCredentials.get("refreshToken").textValue(); RefreshTokenParameters.RefreshTokenParametersBuilder refreshTokenParametersBuilder = RefreshTokenParameters.builder(new HashSet<>(request.getScopes()), refreshToken); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); refreshTokenParametersBuilder.claims(customClaimRequest); } return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(refreshTokenParametersBuilder.build())) .map(MsalToken::new)); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e)); } } else { LOGGER.verbose("IntelliJ Authentication = > Only Service Principal and Device Code Authentication" + " schemes are currently supported via IntelliJ Credential currently. Please ensure you used one" + " of those schemes from Azure Tools for IntelliJ plugin."); return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available." + " Please login with Azure Tools for IntelliJ plugin in the IDE."))); } } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { StringBuilder azCommand = new StringBuilder("az account get-access-token --output json --resource "); String scopes = ScopeUtil.scopesToResource(request.getScopes()); try { ScopeUtil.validateScope(scopes); } catch (IllegalArgumentException ex) { return Mono.error(LOGGER.logExceptionAsError(ex)); } azCommand.append(scopes); String tenant = IdentityUtil.resolveTenantId(null, request, options); if (!CoreUtils.isNullOrEmpty(tenant)) { azCommand.append("--tenant ").append(tenant); } AccessToken token; try { String starter; String switcher; if (isWindowsPlatform()) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, azCommand.toString()); String workingDirectory = getSafeWorkingDirectory(); if (workingDirectory != null) { builder.directory(new File(workingDirectory)); } else { throw LOGGER.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be" + " found to execute CLI command from. To mitigate this issue, please refer to the troubleshooting " + " guidelines here at https: } builder.redirectErrorStream(true); Process process = builder.start(); StringBuilder output = new StringBuilder(); try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8.name()))) { String line; while (true) { line = reader.readLine(); if (line == null) { break; } if (line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || LINUX_MAC_PROCESS_ERROR_MESSAGE.matcher(line).matches()) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable. Azure CLI not installed." + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } output.append(line); } } String processOutput = output.toString(); process.waitFor(10, TimeUnit.SECONDS); if (process.exitValue() != 0) { if (processOutput.length() > 0) { String redactedOutput = redactInfo(processOutput); if (redactedOutput.contains("az login") || redactedOutput.contains("az account set")) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable." + " Please run 'az login' to set up account. To further mitigate this" + " issue, please refer to the troubleshooting guidelines here at " + "https: } throw LOGGER.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null)); } else { throw LOGGER.logExceptionAsError( new ClientAuthenticationException("Failed to invoke Azure CLI ", null)); } } LOGGER.verbose("Azure CLI Authentication => A token response was received from Azure CLI, deserializing the" + " response into an Access Token."); Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME) .atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException | InterruptedException e) { throw LOGGER.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(e instanceof CredentialUnavailableException ? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e) : LOGGER.logExceptionAsError(e)); } return Mono.just(token); } /** * Asynchronously acquire a token from Active Directory with Azure Power Shell. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzurePowerShell(TokenRequestContext request) { List<CredentialUnavailableException> exceptions = new ArrayList<>(2); PowershellManager defaultPowerShellManager = new PowershellManager(Platform.isWindows() ? DEFAULT_WINDOWS_PS_EXECUTABLE : DEFAULT_LINUX_PS_EXECUTABLE); PowershellManager legacyPowerShellManager = Platform.isWindows() ? new PowershellManager(LEGACY_WINDOWS_PS_EXECUTABLE) : null; List<PowershellManager> powershellManagers = new ArrayList<>(2); powershellManagers.add(defaultPowerShellManager); if (legacyPowerShellManager != null) { powershellManagers.add(legacyPowerShellManager); } return Flux.fromIterable(powershellManagers) .flatMap(powershellManager -> getAccessTokenFromPowerShell(request, powershellManager) .onErrorResume(t -> { if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) { return Mono.error(new ClientAuthenticationException( "Azure Powershell authentication failed. Error Details: " + t.getMessage() + ". To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: null, t)); } exceptions.add((CredentialUnavailableException) t); return Mono.empty(); }), 1) .next() .switchIfEmpty(Mono.defer(() -> { CredentialUnavailableException last = exceptions.get(exceptions.size() - 1); for (int z = exceptions.size() - 2; z >= 0; z--) { CredentialUnavailableException current = exceptions.get(z); last = new CredentialUnavailableException("Azure PowerShell authentication failed using default" + "powershell(pwsh) with following error: " + current.getMessage() + "\r\n" + "Azure PowerShell authentication failed using powershell-core(powershell)" + " with following error: " + last.getMessage(), last.getCause()); } return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, (last))); })); } /** * Asynchronously acquire a token from Active Directory with Azure PowerShell. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithOBO(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> confidentialClient.acquireToken(OnBehalfOfParameters .builder(new HashSet<>(request.getScopes()), options.getUserAssertion()) .tenant(IdentityUtil.resolveTenantId(tenantId, request, options)) .build())) .map(MsalToken::new)); } private Mono<AccessToken> getAccessTokenFromPowerShell(TokenRequestContext request, PowershellManager powershellManager) { return powershellManager.initSession() .flatMap(manager -> { String azAccountsCommand = "Import-Module Az.Accounts -MinimumVersion 2.2.0 -PassThru"; return manager.runCommand(azAccountsCommand) .flatMap(output -> { if (output.contains("The specified module 'Az.Accounts' with version '2.2.0' was not loaded " + "because no valid module file")) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Az.Account module with version >= 2.2.0 is not installed. It needs to be installed to" + " use Azure PowerShell Credential."))); } LOGGER.verbose("Az.accounts module was found installed."); StringBuilder accessTokenCommand = new StringBuilder("Get-AzAccessToken -ResourceUrl "); accessTokenCommand.append(ScopeUtil.scopesToResource(request.getScopes())); accessTokenCommand.append(" | ConvertTo-Json"); String command = accessTokenCommand.toString(); LOGGER.verbose("Azure Powershell Authentication => Executing the command `%s` in Azure " + "Powershell to retrieve the Access Token.", accessTokenCommand); return manager.runCommand(accessTokenCommand.toString()) .flatMap(out -> { if (out.contains("Run Connect-AzAccount to login")) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Run Connect-AzAccount to login to Azure account in PowerShell."))); } try { LOGGER.verbose("Azure Powershell Authentication => Attempting to deserialize the " + "received response from Azure Powershell."); Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(out, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("Token"); String time = objectMap.get("ExpiresOn"); OffsetDateTime expiresOn = OffsetDateTime.parse(time) .withOffsetSameInstant(ZoneOffset.UTC); return Mono.just(new AccessToken(accessToken, expiresOn)); } catch (IOException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Encountered error when deserializing response from Azure Power Shell.", e))); } }); }); }).doFinally(ignored -> powershellManager.close()); } /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClient(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { ClientCredentialParameters.ClientCredentialParametersBuilder builder = ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); return confidentialClient.acquireToken(builder.build()); } )).map(MsalToken::new); } private HttpPipeline setupPipeline(HttpClient httpClient) { List<HttpPipelinePolicy> policies = new ArrayList<>(); HttpLogOptions httpLogOptions = new HttpLogOptions(); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RetryPolicy()); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder().httpClient(httpClient) .policies(policies.toArray(new HttpPipelinePolicy[0])).build(); } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { UserNamePasswordParameters.UserNamePasswordParametersBuilder userNamePasswordParametersBuilder = UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest .formatAsClaimsRequest(request.getClaims()); userNamePasswordParametersBuilder.claims(customClaimRequest); } userNamePasswordParametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); return pc.acquireToken(userNamePasswordParametersBuilder.build()); } )).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with username and " + "password. To mitigate this issue, please refer to the troubleshooting guidelines " + "here at https: null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @param account the account used to log in to acquire the last token * @return a Publisher that emits an AccessToken */ @SuppressWarnings("deprecation") public Mono<MsalToken> authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); parametersBuilder.forceRefresh(true); } if (account != null) { parametersBuilder = parametersBuilder.account(account); } parametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); try { return pc.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET))) .switchIfEmpty(Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder forceParametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())).forceRefresh(true); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest .formatAsClaimsRequest(request.getClaims()); forceParametersBuilder.claims(customClaimRequest); } if (account != null) { forceParametersBuilder = forceParametersBuilder.account(account); } forceParametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); try { return pc.acquireTokenSilently(forceParametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new))); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ @SuppressWarnings("deprecation") public Mono<AccessToken> authenticateWithConfidentialClientCache(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())) .tenant(IdentityUtil.resolveTenantId(tenantId, request, options)); try { return confidentialClient.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(ar -> (AccessToken) new MsalToken(ar)) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET)))); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return publicClientApplicationAccessor.getValue().flatMap(pc -> Mono.fromFuture(() -> { DeviceCodeFlowParameters.DeviceCodeFlowParametersBuilder parametersBuilder = DeviceCodeFlowParameters.builder( new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept( new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } return pc.acquireToken(parametersBuilder.build()); }).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with device code", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with Visual Studio cached refresh token. * * @param request the details of the token request * @return a Publisher that emits an AccessToken. */ public Mono<MsalToken> authenticateWithVsCodeCredential(TokenRequestContext request, String cloud) { if (isADFSTenant()) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("VsCodeCredential " + "authentication unavailable. ADFS tenant/authorities are not supported. " + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } VisualStudioCacheAccessor accessor = new VisualStudioCacheAccessor(); String credential = null; try { credential = accessor.getCredentials("VS Code Azure", cloud); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e)); } RefreshTokenParameters.RefreshTokenParametersBuilder parametersBuilder = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), credential); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parametersBuilder.build())) .onErrorResume(t -> { if (t instanceof MsalInteractionRequiredException) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("Failed to acquire token with" + " VS code credential." + " To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } return Mono.error(new ClientAuthenticationException("Failed to acquire token with" + " VS code credential", null, t)); }) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { AuthorizationCodeParameters.AuthorizationCodeParametersBuilder parametersBuilder = AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } Mono<IAuthenticationResult> acquireToken; if (clientSecret != null) { acquireToken = confidentialClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build()))); } else { acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build()))); } return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with authorization code", null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @param redirectUrl the redirect URL to listen on and receive security code * @param loginHint the username suggestion to pre-fill the login page's username/email address field * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, Integer port, String redirectUrl, String loginHint) { URI redirectUri; String redirect; if (port != null) { redirect = HTTP_LOCALHOST + ":" + port; } else if (redirectUrl != null) { redirect = redirectUrl; } else { redirect = HTTP_LOCALHOST; } try { redirectUri = new URI(redirect); } catch (URISyntaxException e) { return Mono.error(LOGGER.logExceptionAsError(new RuntimeException(e))); } InteractiveRequestParameters.InteractiveRequestParametersBuilder builder = InteractiveRequestParameters.builder(redirectUri) .scopes(new HashSet<>(request.getScopes())) .prompt(Prompt.SELECT_ACCOUNT) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); builder.claims(customClaimRequest); } if (loginHint != null) { builder.loginHint(loginHint); } Mono<IAuthenticationResult> acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(builder.build()))); return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with Interactive Browser Authentication.", null, t)).map(MsalToken::new); } /** * Gets token from shared token cache * */ public Mono<MsalToken> authenticateWithSharedTokenCache(TokenRequestContext request, String username) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.getAccounts()) .onErrorMap(t -> new CredentialUnavailableException( "Cannot get accounts from token cache. Error: " + t.getMessage(), t)) .flatMap(set -> { IAccount requestedAccount; Map<String, IAccount> accounts = new HashMap<>(); if (set.isEmpty()) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("SharedTokenCacheCredential " + "authentication unavailable. No accounts were found in the cache."))); } for (IAccount cached : set) { if (username == null || username.equals(cached.username())) { if (!accounts.containsKey(cached.homeAccountId())) { accounts.put(cached.homeAccountId(), cached); } } } if (accounts.isEmpty()) { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. No account matching the specified username: %s was " + "found in the cache.", username))); } else if (accounts.size() > 1) { if (username == null) { return Mono.error(new RuntimeException("SharedTokenCacheCredential authentication " + "unavailable. Multiple accounts were found in the cache. Use username and " + "tenant id to disambiguate.")); } else { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. Multiple accounts matching the specified username: " + "%s were found in the cache.", username))); } } else { requestedAccount = accounts.values().iterator().next(); } return authenticateWithPublicClientCache(request, requestedAccount); })); } /** * Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToArcManagedIdentityEndpoint(String identityEndpoint, TokenRequestContext request) { return Mono.fromCallable(() -> { HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(ScopeUtil.scopesToResource(request.getScopes()), StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode("2019-11-01", StandardCharsets.UTF_8.name())); URL url = new URL(String.format("%s?%s", identityEndpoint, payload)); String secretKey = null; try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); } catch (IOException e) { if (connection == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to initialize " + "Http URL connection to the endpoint.", null, e)); } int status = connection.getResponseCode(); if (status != 401) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException(String.format("Expected a 401" + " Unauthorized response from Azure Arc Managed Identity Endpoint, received: %d", status), null, e)); } String realm = connection.getHeaderField("WWW-Authenticate"); if (realm == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a value" + " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint", null)); } int separatorIndex = realm.indexOf("="); if (separatorIndex == -1) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a correct value" + " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint", null)); } String secretKeyPath = realm.substring(separatorIndex + 1); secretKey = new String(Files.readAllBytes(Paths.get(secretKeyPath)), StandardCharsets.UTF_8); } finally { if (connection != null) { connection.disconnect(); } } if (secretKey == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a secret value" + " in the response from Azure Arc Managed Identity Endpoint", null)); } try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Authorization", String.format("Basic %s", secretKey)); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner scanner = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = scanner.hasNext() ? scanner.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithExchangeToken(TokenRequestContext request) { return clientAssertionAccessor.getValue() .flatMap(assertionToken -> Mono.fromCallable(() -> { String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId + "/oauth2/v2.0/token"; StringBuilder urlParametersBuilder = new StringBuilder(); urlParametersBuilder.append("client_assertion="); urlParametersBuilder.append(assertionToken); urlParametersBuilder.append("&client_assertion_type=urn:ietf:params:oauth:client-assertion-type" + ":jwt-bearer"); urlParametersBuilder.append("&client_id="); urlParametersBuilder.append(clientId); urlParametersBuilder.append("&grant_type=client_credentials"); urlParametersBuilder.append("&scope="); urlParametersBuilder.append(URLEncoder.encode(request.getScopes().get(0), StandardCharsets.UTF_8.name())); String urlParams = urlParametersBuilder.toString(); byte[] postData = urlParams.getBytes(StandardCharsets.UTF_8); int postDataLength = postData.length; HttpURLConnection connection = null; URL url = new URL(authorityUrl); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("POST"); connection.setRequestProperty("Content-Type", "application/x-www-form-urlencoded"); connection.setRequestProperty("Content-Length", Integer.toString(postDataLength)); connection.setDoOutput(true); try (DataOutputStream outputStream = new DataOutputStream(connection.getOutputStream())) { outputStream.write(postData); } connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } })); } /** * Asynchronously acquire a token from the Azure Service Fabric Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param identityHeader the identity header to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToServiceFabricManagedIdentityEndpoint(String identityEndpoint, String identityHeader, String thumbprint, TokenRequestContext request) { return Mono.fromCallable(() -> { HttpsURLConnection connection = null; String endpoint = identityEndpoint; String headerValue = identityHeader; String endpointVersion = SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION; String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } try { URL url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpsURLConnection) url.openConnection(); IdentitySslUtil.addTrustedCertificateThumbprint(connection, thumbprint, LOGGER); connection.setRequestMethod("GET"); if (headerValue != null) { connection.setRequestProperty("Secret", headerValue); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param identityHeader the identity header to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String identityEndpoint, String identityHeader, TokenRequestContext request) { return Mono.fromCallable(() -> { String endpoint; String headerValue; String endpointVersion; endpoint = identityEndpoint; headerValue = identityHeader; endpointVersion = IDENTITY_ENDPOINT_VERSION; String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } try { URL url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (headerValue != null) { if (IDENTITY_ENDPOINT_VERSION.equals(endpointVersion)) { connection.setRequestProperty("X-IDENTITY-HEADER", headerValue); } else { connection.setRequestProperty("Secret", headerValue); } } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ private Mono<Boolean> checkIMDSAvailable(String endpoint) { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", StandardCharsets.UTF_8.name())); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("%s?%s", endpoint, payload)); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } catch (Exception e) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } private String getSafeWorkingDirectory() { if (isWindowsPlatform()) { if (CoreUtils.isNullOrEmpty(DEFAULT_WINDOWS_SYSTEM_ROOT)) { return null; } return DEFAULT_WINDOWS_SYSTEM_ROOT + "\\system32"; } else { return DEFAULT_MAC_LINUX_PATH; } } private boolean isWindowsPlatform() { return System.getProperty("os.name").contains("Windows"); } private String redactInfo(String input) { return ACCESS_TOKEN_PATTERN.matcher(input).replaceAll("****"); } void openUrl(String url) throws IOException { Runtime rt = Runtime.getRuntime(); String os = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (os.contains("win")) { rt.exec("rundll32 url.dll,FileProtocolHandler " + url); } else if (os.contains("mac")) { rt.exec("open " + url); } else if (os.contains("nix") || os.contains("nux")) { rt.exec("xdg-open " + url); } else { LOGGER.error("Browser could not be opened - please open {} in a browser on this device.", url); } } private CompletableFuture<IAuthenticationResult> getFailedCompletableFuture(Exception e) { CompletableFuture<IAuthenticationResult> completableFuture = new CompletableFuture<>(); completableFuture.completeExceptionally(e); return completableFuture; } private void initializeHttpPipelineAdapter() { HttpPipeline httpPipeline = options.getHttpPipeline(); if (httpPipeline != null) { httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline); } else { HttpClient httpClient = options.getHttpClient(); if (httpClient != null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient)); } else if (options.getProxyOptions() == null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault())); } } } /** * Get the configured tenant id. * * @return the tenant id. */ public String getTenantId() { return tenantId; } /** * Get the configured client id. * * @return the client id. */ public String getClientId() { return clientId; } /** * Get the configured identity client options. * * @return the client options. */ public IdentityClientOptions getIdentityClientOptions() { return options; } private boolean isADFSTenant() { return this.tenantId.equals(ADFS_TENANT); } private byte[] getCertificateBytes() throws IOException { if (certificatePath != null) { return Files.readAllBytes(Paths.get(certificatePath)); } else if (certificate != null) { ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); byte[] buffer = new byte[1024]; int read = certificate.read(buffer, 0, buffer.length); while (read != -1) { outputStream.write(buffer, 0, read); read = certificate.read(buffer, 0, buffer.length); } return outputStream.toByteArray(); } else { return new byte[0]; } } private InputStream getCertificateInputStream() throws IOException { if (certificatePath != null) { return new BufferedInputStream(new FileInputStream(certificatePath)); } else { return certificate; } } }
Use a `BufferedInputStream` to prevent reading the file byte-by-byte which has a much higher overhead
private InputStream getCertificateInputStream() throws IOException { if (certificatePath != null) { return new BufferedInputStream(new FileInputStream(certificatePath)); } else { return certificate; } }
return new BufferedInputStream(new FileInputStream(certificatePath));
private InputStream getCertificateInputStream() throws IOException { if (certificatePath != null) { return new BufferedInputStream(new FileInputStream(certificatePath)); } else { return certificate; } }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private static final String WINDOWS_STARTER = "cmd.exe"; private static final String LINUX_MAC_STARTER = "/bin/sh"; private static final String WINDOWS_SWITCHER = "/c"; private static final String LINUX_MAC_SWITCHER = "-c"; private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized"; private static final Pattern LINUX_MAC_PROCESS_ERROR_MESSAGE = Pattern.compile("(.*)az:(.*)not found"); private static final String DEFAULT_WINDOWS_SYSTEM_ROOT = System.getenv("SystemRoot"); private static final String DEFAULT_WINDOWS_PS_EXECUTABLE = "pwsh.exe"; private static final String LEGACY_WINDOWS_PS_EXECUTABLE = "powershell.exe"; private static final String DEFAULT_LINUX_PS_EXECUTABLE = "pwsh"; private static final String DEFAULT_MAC_LINUX_PATH = "/bin/"; private static final Duration REFRESH_OFFSET = Duration.ofMinutes(5); private static final String IDENTITY_ENDPOINT_VERSION = "2019-08-01"; private static final String MSI_ENDPOINT_VERSION = "2017-09-01"; private static final String ADFS_TENANT = "adfs"; private static final String HTTP_LOCALHOST = "http: private static final String SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION = "2019-07-01-preview"; private static final ClientLogger LOGGER = new ClientLogger(IdentityClient.class); private static final Pattern ACCESS_TOKEN_PATTERN = Pattern.compile("\"accessToken\": \"(.*?)(\"|$)"); private static final Pattern TRAILING_FORWARD_SLASHES = Pattern.compile("/+$"); private final IdentityClientOptions options; private final String tenantId; private final String clientId; private final String resourceId; private final String clientSecret; private final String clientAssertionFilePath; private final InputStream certificate; private final String certificatePath; private final Supplier<String> clientAssertionSupplier; private final String certificatePassword; private HttpPipelineAdapter httpPipelineAdapter; private final SynchronizedAccessor<PublicClientApplication> publicClientApplicationAccessor; private final SynchronizedAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor; private final SynchronizedAccessor<String> clientAssertionAccessor; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param clientSecret the client secret of the application. * @param resourceId the resource ID of the application * @param certificatePath the path to the PKCS12 or PEM certificate of the application. * @param certificate the PKCS12 or PEM certificate of the application. * @param certificatePassword the password protecting the PFX certificate. * @param isSharedTokenCacheCredential Indicate whether the credential is * {@link com.azure.identity.SharedTokenCacheCredential} or not. * @param clientAssertionTimeout the timeout to use for the client assertion. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, String clientSecret, String certificatePath, String clientAssertionFilePath, String resourceId, Supplier<String> clientAssertionSupplier, InputStream certificate, String certificatePassword, boolean isSharedTokenCacheCredential, Duration clientAssertionTimeout, IdentityClientOptions options) { if (tenantId == null) { tenantId = "organizations"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.resourceId = resourceId; this.clientSecret = clientSecret; this.clientAssertionFilePath = clientAssertionFilePath; this.certificatePath = certificatePath; this.certificate = certificate; this.certificatePassword = certificatePassword; this.clientAssertionSupplier = clientAssertionSupplier; this.options = options; this.publicClientApplicationAccessor = new SynchronizedAccessor<>(() -> getPublicClientApplication(isSharedTokenCacheCredential)); this.confidentialClientApplicationAccessor = new SynchronizedAccessor<>(() -> getConfidentialClientApplication()); this.clientAssertionAccessor = clientAssertionTimeout == null ? new SynchronizedAccessor<>(() -> parseClientAssertion(), Duration.ofMinutes(5)) : new SynchronizedAccessor<>(() -> parseClientAssertion(), clientAssertionTimeout); } private Mono<ConfidentialClientApplication> getConfidentialClientApplication() { return Mono.defer(() -> { if (clientId == null) { return Mono.error(LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication."))); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; IClientCredential credential; if (clientSecret != null) { credential = ClientCredentialFactory.createFromSecret(clientSecret); } else if (certificate != null || certificatePath != null) { try { if (certificatePassword == null) { byte[] pemCertificateBytes = getCertificateBytes(); List<X509Certificate> x509CertificateList = CertificateUtil.publicKeyFromPem(pemCertificateBytes); PrivateKey privateKey = CertificateUtil.privateKeyFromPem(pemCertificateBytes); if (x509CertificateList.size() == 1) { credential = ClientCredentialFactory.createFromCertificate( privateKey, x509CertificateList.get(0)); } else { credential = ClientCredentialFactory.createFromCertificateChain( privateKey, x509CertificateList); } } else { try (InputStream pfxCertificateStream = getCertificateInputStream()) { credential = ClientCredentialFactory.createFromCertificate(pfxCertificateStream, certificatePassword); } } } catch (IOException | GeneralSecurityException e) { return Mono.error(LOGGER.logExceptionAsError(new RuntimeException( "Failed to parse the certificate for the credential: " + e.getMessage(), e))); } } else if (clientAssertionSupplier != null) { credential = ClientCredentialFactory.createFromClientAssertion(clientAssertionSupplier.get()); } else { return Mono.error(LOGGER.logExceptionAsError( new IllegalArgumentException("Must provide client secret or client certificate path." + " To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, credential); try { applicationBuilder = applicationBuilder.authority(authorityUrl) .validateAuthority(options.getAuthorityValidation()); } catch (MalformedURLException e) { return Mono.error(LOGGER.logExceptionAsWarning(new IllegalStateException(e))); } applicationBuilder.sendX5c(options.isIncludeX5c()); initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl() .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); applicationBuilder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { return Mono.error(LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t))); } } if (options.getRegionalAuthority() != null) { if (options.getRegionalAuthority() == RegionalAuthority.AUTO_DISCOVER_REGION) { applicationBuilder.autoDetectRegion(true); } else { applicationBuilder.azureRegion(options.getRegionalAuthority().toString()); } } ConfidentialClientApplication confidentialClientApplication = applicationBuilder.build(); return tokenCache != null ? tokenCache.registerCache() .map(ignored -> confidentialClientApplication) : Mono.just(confidentialClientApplication); }); } private Mono<String> parseClientAssertion() { return Mono.fromCallable(() -> { if (clientAssertionFilePath != null) { byte[] encoded = Files.readAllBytes(Paths.get(clientAssertionFilePath)); return new String(encoded, StandardCharsets.UTF_8); } else { throw LOGGER.logExceptionAsError(new IllegalStateException( "Client Assertion File Path is not provided." + " It should be provided to authenticate with client assertion." )); } }); } private Mono<PublicClientApplication> getPublicClientApplication(boolean sharedTokenCacheCredential) { return Mono.defer(() -> { if (clientId == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl) .validateAuthority(options.getAuthorityValidation()); } catch (MalformedURLException e) { throw LOGGER.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { publicClientApplicationBuilder.httpClient(httpPipelineAdapter); } else { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { publicClientApplicationBuilder.executorService(options.getExecutorService()); } if (!options.isCp1Disabled()) { Set<String> set = new HashSet<>(1); set.add("CP1"); publicClientApplicationBuilder.clientCapabilities(set); } return Mono.just(publicClientApplicationBuilder); }).flatMap(builder -> { TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl() .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); builder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t)); } } PublicClientApplication publicClientApplication = builder.build(); return tokenCache != null ? tokenCache.registerCache() .map(ignored -> publicClientApplication) : Mono.just(publicClientApplication); }); } public Mono<MsalToken> authenticateWithIntelliJ(TokenRequestContext request) { try { IntelliJCacheAccessor cacheAccessor = new IntelliJCacheAccessor(options.getIntelliJKeePassDatabasePath()); IntelliJAuthMethodDetails authDetails; try { authDetails = cacheAccessor.getAuthDetailsIfAvailable(); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available.", e))); } if (authDetails == null) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available." + " Please log in with Azure Tools for IntelliJ plugin in the IDE."))); } String authType = authDetails.getAuthMethod(); if ("SP".equalsIgnoreCase(authType)) { Map<String, String> spDetails = cacheAccessor .getIntellijServicePrincipalDetails(authDetails.getCredFilePath()); String authorityUrl = spDetails.get("authURL") + spDetails.get("tenant"); try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(spDetails.get("client"), ClientCredentialFactory.createFromSecret(spDetails.get("key"))) .authority(authorityUrl).validateAuthority(options.getAuthorityValidation()); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } } else if ("DC".equalsIgnoreCase(authType)) { LOGGER.verbose("IntelliJ Authentication => Device Code Authentication scheme detected in Azure Tools" + " for IntelliJ Plugin."); if (isADFSTenant()) { LOGGER.verbose("IntelliJ Authentication => The input tenant is detected to be ADFS and" + " the ADFS tenants are not supported via IntelliJ Authentication currently."); return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJCredential " + "authentication unavailable. ADFS tenant/authorities are not supported."))); } try { JsonNode intelliJCredentials = cacheAccessor.getDeviceCodeCredentials(); String refreshToken = intelliJCredentials.get("refreshToken").textValue(); RefreshTokenParameters.RefreshTokenParametersBuilder refreshTokenParametersBuilder = RefreshTokenParameters.builder(new HashSet<>(request.getScopes()), refreshToken); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); refreshTokenParametersBuilder.claims(customClaimRequest); } return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(refreshTokenParametersBuilder.build())) .map(MsalToken::new)); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e)); } } else { LOGGER.verbose("IntelliJ Authentication = > Only Service Principal and Device Code Authentication" + " schemes are currently supported via IntelliJ Credential currently. Please ensure you used one" + " of those schemes from Azure Tools for IntelliJ plugin."); return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available." + " Please login with Azure Tools for IntelliJ plugin in the IDE."))); } } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { StringBuilder azCommand = new StringBuilder("az account get-access-token --output json --resource "); String scopes = ScopeUtil.scopesToResource(request.getScopes()); try { ScopeUtil.validateScope(scopes); } catch (IllegalArgumentException ex) { return Mono.error(LOGGER.logExceptionAsError(ex)); } azCommand.append(scopes); String tenant = IdentityUtil.resolveTenantId(null, request, options); if (!CoreUtils.isNullOrEmpty(tenant)) { azCommand.append("--tenant ").append(tenant); } AccessToken token; try { String starter; String switcher; if (isWindowsPlatform()) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, azCommand.toString()); String workingDirectory = getSafeWorkingDirectory(); if (workingDirectory != null) { builder.directory(new File(workingDirectory)); } else { throw LOGGER.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be" + " found to execute CLI command from. To mitigate this issue, please refer to the troubleshooting " + " guidelines here at https: } builder.redirectErrorStream(true); Process process = builder.start(); StringBuilder output = new StringBuilder(); try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8.name()))) { String line; while (true) { line = reader.readLine(); if (line == null) { break; } if (line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || LINUX_MAC_PROCESS_ERROR_MESSAGE.matcher(line).matches()) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable. Azure CLI not installed." + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } output.append(line); } } String processOutput = output.toString(); process.waitFor(10, TimeUnit.SECONDS); if (process.exitValue() != 0) { if (processOutput.length() > 0) { String redactedOutput = redactInfo(processOutput); if (redactedOutput.contains("az login") || redactedOutput.contains("az account set")) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable." + " Please run 'az login' to set up account. To further mitigate this" + " issue, please refer to the troubleshooting guidelines here at " + "https: } throw LOGGER.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null)); } else { throw LOGGER.logExceptionAsError( new ClientAuthenticationException("Failed to invoke Azure CLI ", null)); } } LOGGER.verbose("Azure CLI Authentication => A token response was received from Azure CLI, deserializing the" + " response into an Access Token."); Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME) .atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException | InterruptedException e) { throw LOGGER.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(e instanceof CredentialUnavailableException ? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e) : LOGGER.logExceptionAsError(e)); } return Mono.just(token); } /** * Asynchronously acquire a token from Active Directory with Azure Power Shell. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzurePowerShell(TokenRequestContext request) { List<CredentialUnavailableException> exceptions = new ArrayList<>(2); PowershellManager defaultPowerShellManager = new PowershellManager(Platform.isWindows() ? DEFAULT_WINDOWS_PS_EXECUTABLE : DEFAULT_LINUX_PS_EXECUTABLE); PowershellManager legacyPowerShellManager = Platform.isWindows() ? new PowershellManager(LEGACY_WINDOWS_PS_EXECUTABLE) : null; List<PowershellManager> powershellManagers = new ArrayList<>(2); powershellManagers.add(defaultPowerShellManager); if (legacyPowerShellManager != null) { powershellManagers.add(legacyPowerShellManager); } return Flux.fromIterable(powershellManagers) .flatMap(powershellManager -> getAccessTokenFromPowerShell(request, powershellManager) .onErrorResume(t -> { if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) { return Mono.error(new ClientAuthenticationException( "Azure Powershell authentication failed. Error Details: " + t.getMessage() + ". To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: null, t)); } exceptions.add((CredentialUnavailableException) t); return Mono.empty(); }), 1) .next() .switchIfEmpty(Mono.defer(() -> { CredentialUnavailableException last = exceptions.get(exceptions.size() - 1); for (int z = exceptions.size() - 2; z >= 0; z--) { CredentialUnavailableException current = exceptions.get(z); last = new CredentialUnavailableException("Azure PowerShell authentication failed using default" + "powershell(pwsh) with following error: " + current.getMessage() + "\r\n" + "Azure PowerShell authentication failed using powershell-core(powershell)" + " with following error: " + last.getMessage(), last.getCause()); } return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, (last))); })); } /** * Asynchronously acquire a token from Active Directory with Azure PowerShell. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithOBO(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> confidentialClient.acquireToken(OnBehalfOfParameters .builder(new HashSet<>(request.getScopes()), options.getUserAssertion()) .tenant(IdentityUtil.resolveTenantId(tenantId, request, options)) .build())) .map(MsalToken::new)); } private Mono<AccessToken> getAccessTokenFromPowerShell(TokenRequestContext request, PowershellManager powershellManager) { return powershellManager.initSession() .flatMap(manager -> { String azAccountsCommand = "Import-Module Az.Accounts -MinimumVersion 2.2.0 -PassThru"; return manager.runCommand(azAccountsCommand) .flatMap(output -> { if (output.contains("The specified module 'Az.Accounts' with version '2.2.0' was not loaded " + "because no valid module file")) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Az.Account module with version >= 2.2.0 is not installed. It needs to be installed to" + " use Azure PowerShell Credential."))); } LOGGER.verbose("Az.accounts module was found installed."); StringBuilder accessTokenCommand = new StringBuilder("Get-AzAccessToken -ResourceUrl "); accessTokenCommand.append(ScopeUtil.scopesToResource(request.getScopes())); accessTokenCommand.append(" | ConvertTo-Json"); String command = accessTokenCommand.toString(); LOGGER.verbose("Azure Powershell Authentication => Executing the command `%s` in Azure " + "Powershell to retrieve the Access Token.", accessTokenCommand); return manager.runCommand(accessTokenCommand.toString()) .flatMap(out -> { if (out.contains("Run Connect-AzAccount to login")) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Run Connect-AzAccount to login to Azure account in PowerShell."))); } try { LOGGER.verbose("Azure Powershell Authentication => Attempting to deserialize the " + "received response from Azure Powershell."); Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(out, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("Token"); String time = objectMap.get("ExpiresOn"); OffsetDateTime expiresOn = OffsetDateTime.parse(time) .withOffsetSameInstant(ZoneOffset.UTC); return Mono.just(new AccessToken(accessToken, expiresOn)); } catch (IOException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Encountered error when deserializing response from Azure Power Shell.", e))); } }); }); }).doFinally(ignored -> powershellManager.close()); } /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClient(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { ClientCredentialParameters.ClientCredentialParametersBuilder builder = ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); return confidentialClient.acquireToken(builder.build()); } )).map(MsalToken::new); } private HttpPipeline setupPipeline(HttpClient httpClient) { List<HttpPipelinePolicy> policies = new ArrayList<>(); HttpLogOptions httpLogOptions = new HttpLogOptions(); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RetryPolicy()); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder().httpClient(httpClient) .policies(policies.toArray(new HttpPipelinePolicy[0])).build(); } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { UserNamePasswordParameters.UserNamePasswordParametersBuilder userNamePasswordParametersBuilder = UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest .formatAsClaimsRequest(request.getClaims()); userNamePasswordParametersBuilder.claims(customClaimRequest); } userNamePasswordParametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); return pc.acquireToken(userNamePasswordParametersBuilder.build()); } )).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with username and " + "password. To mitigate this issue, please refer to the troubleshooting guidelines " + "here at https: null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @param account the account used to log in to acquire the last token * @return a Publisher that emits an AccessToken */ @SuppressWarnings("deprecation") public Mono<MsalToken> authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); parametersBuilder.forceRefresh(true); } if (account != null) { parametersBuilder = parametersBuilder.account(account); } parametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); try { return pc.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET))) .switchIfEmpty(Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder forceParametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())).forceRefresh(true); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest .formatAsClaimsRequest(request.getClaims()); forceParametersBuilder.claims(customClaimRequest); } if (account != null) { forceParametersBuilder = forceParametersBuilder.account(account); } forceParametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); try { return pc.acquireTokenSilently(forceParametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new))); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ @SuppressWarnings("deprecation") public Mono<AccessToken> authenticateWithConfidentialClientCache(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())) .tenant(IdentityUtil.resolveTenantId(tenantId, request, options)); try { return confidentialClient.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(ar -> (AccessToken) new MsalToken(ar)) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET)))); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return publicClientApplicationAccessor.getValue().flatMap(pc -> Mono.fromFuture(() -> { DeviceCodeFlowParameters.DeviceCodeFlowParametersBuilder parametersBuilder = DeviceCodeFlowParameters.builder( new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept( new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } return pc.acquireToken(parametersBuilder.build()); }).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with device code", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with Visual Studio cached refresh token. * * @param request the details of the token request * @return a Publisher that emits an AccessToken. */ public Mono<MsalToken> authenticateWithVsCodeCredential(TokenRequestContext request, String cloud) { if (isADFSTenant()) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("VsCodeCredential " + "authentication unavailable. ADFS tenant/authorities are not supported. " + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } VisualStudioCacheAccessor accessor = new VisualStudioCacheAccessor(); String credential = null; try { credential = accessor.getCredentials("VS Code Azure", cloud); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e)); } RefreshTokenParameters.RefreshTokenParametersBuilder parametersBuilder = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), credential); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parametersBuilder.build())) .onErrorResume(t -> { if (t instanceof MsalInteractionRequiredException) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("Failed to acquire token with" + " VS code credential." + " To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } return Mono.error(new ClientAuthenticationException("Failed to acquire token with" + " VS code credential", null, t)); }) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { AuthorizationCodeParameters.AuthorizationCodeParametersBuilder parametersBuilder = AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } Mono<IAuthenticationResult> acquireToken; if (clientSecret != null) { acquireToken = confidentialClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build()))); } else { acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build()))); } return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with authorization code", null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @param redirectUrl the redirect URL to listen on and receive security code * @param loginHint the username suggestion to pre-fill the login page's username/email address field * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, Integer port, String redirectUrl, String loginHint) { URI redirectUri; String redirect; if (port != null) { redirect = HTTP_LOCALHOST + ":" + port; } else if (redirectUrl != null) { redirect = redirectUrl; } else { redirect = HTTP_LOCALHOST; } try { redirectUri = new URI(redirect); } catch (URISyntaxException e) { return Mono.error(LOGGER.logExceptionAsError(new RuntimeException(e))); } InteractiveRequestParameters.InteractiveRequestParametersBuilder builder = InteractiveRequestParameters.builder(redirectUri) .scopes(new HashSet<>(request.getScopes())) .prompt(Prompt.SELECT_ACCOUNT) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); builder.claims(customClaimRequest); } if (loginHint != null) { builder.loginHint(loginHint); } Mono<IAuthenticationResult> acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(builder.build()))); return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with Interactive Browser Authentication.", null, t)).map(MsalToken::new); } /** * Gets token from shared token cache * */ public Mono<MsalToken> authenticateWithSharedTokenCache(TokenRequestContext request, String username) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.getAccounts()) .onErrorMap(t -> new CredentialUnavailableException( "Cannot get accounts from token cache. Error: " + t.getMessage(), t)) .flatMap(set -> { IAccount requestedAccount; Map<String, IAccount> accounts = new HashMap<>(); if (set.isEmpty()) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("SharedTokenCacheCredential " + "authentication unavailable. No accounts were found in the cache."))); } for (IAccount cached : set) { if (username == null || username.equals(cached.username())) { if (!accounts.containsKey(cached.homeAccountId())) { accounts.put(cached.homeAccountId(), cached); } } } if (accounts.isEmpty()) { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. No account matching the specified username: %s was " + "found in the cache.", username))); } else if (accounts.size() > 1) { if (username == null) { return Mono.error(new RuntimeException("SharedTokenCacheCredential authentication " + "unavailable. Multiple accounts were found in the cache. Use username and " + "tenant id to disambiguate.")); } else { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. Multiple accounts matching the specified username: " + "%s were found in the cache.", username))); } } else { requestedAccount = accounts.values().iterator().next(); } return authenticateWithPublicClientCache(request, requestedAccount); })); } /** * Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToArcManagedIdentityEndpoint(String identityEndpoint, TokenRequestContext request) { return Mono.fromCallable(() -> { HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(ScopeUtil.scopesToResource(request.getScopes()), StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode("2019-11-01", StandardCharsets.UTF_8.name())); URL url = new URL(String.format("%s?%s", identityEndpoint, payload)); String secretKey = null; try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); } catch (IOException e) { if (connection == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to initialize " + "Http URL connection to the endpoint.", null, e)); } int status = connection.getResponseCode(); if (status != 401) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException(String.format("Expected a 401" + " Unauthorized response from Azure Arc Managed Identity Endpoint, received: %d", status), null, e)); } String realm = connection.getHeaderField("WWW-Authenticate"); if (realm == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a value" + " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint", null)); } int separatorIndex = realm.indexOf("="); if (separatorIndex == -1) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a correct value" + " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint", null)); } String secretKeyPath = realm.substring(separatorIndex + 1); secretKey = new String(Files.readAllBytes(Paths.get(secretKeyPath)), StandardCharsets.UTF_8); } finally { if (connection != null) { connection.disconnect(); } } if (secretKey == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a secret value" + " in the response from Azure Arc Managed Identity Endpoint", null)); } try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Authorization", String.format("Basic %s", secretKey)); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner scanner = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = scanner.hasNext() ? scanner.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithExchangeToken(TokenRequestContext request) { return clientAssertionAccessor.getValue() .flatMap(assertionToken -> Mono.fromCallable(() -> { String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId + "/oauth2/v2.0/token"; StringBuilder urlParametersBuilder = new StringBuilder(); urlParametersBuilder.append("client_assertion="); urlParametersBuilder.append(assertionToken); urlParametersBuilder.append("&client_assertion_type=urn:ietf:params:oauth:client-assertion-type" + ":jwt-bearer"); urlParametersBuilder.append("&client_id="); urlParametersBuilder.append(clientId); urlParametersBuilder.append("&grant_type=client_credentials"); urlParametersBuilder.append("&scope="); urlParametersBuilder.append(URLEncoder.encode(request.getScopes().get(0), StandardCharsets.UTF_8.name())); String urlParams = urlParametersBuilder.toString(); byte[] postData = urlParams.getBytes(StandardCharsets.UTF_8); int postDataLength = postData.length; HttpURLConnection connection = null; URL url = new URL(authorityUrl); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("POST"); connection.setRequestProperty("Content-Type", "application/x-www-form-urlencoded"); connection.setRequestProperty("Content-Length", Integer.toString(postDataLength)); connection.setDoOutput(true); try (DataOutputStream outputStream = new DataOutputStream(connection.getOutputStream())) { outputStream.write(postData); } connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } })); } /** * Asynchronously acquire a token from the Azure Service Fabric Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param identityHeader the identity header to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToServiceFabricManagedIdentityEndpoint(String identityEndpoint, String identityHeader, String thumbprint, TokenRequestContext request) { return Mono.fromCallable(() -> { HttpsURLConnection connection = null; String endpoint = identityEndpoint; String headerValue = identityHeader; String endpointVersion = SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION; String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } try { URL url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpsURLConnection) url.openConnection(); IdentitySslUtil.addTrustedCertificateThumbprint(connection, thumbprint, LOGGER); connection.setRequestMethod("GET"); if (headerValue != null) { connection.setRequestProperty("Secret", headerValue); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param identityHeader the identity header to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String identityEndpoint, String identityHeader, TokenRequestContext request) { return Mono.fromCallable(() -> { String endpoint; String headerValue; String endpointVersion; endpoint = identityEndpoint; headerValue = identityHeader; endpointVersion = IDENTITY_ENDPOINT_VERSION; String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } try { URL url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (headerValue != null) { if (IDENTITY_ENDPOINT_VERSION.equals(endpointVersion)) { connection.setRequestProperty("X-IDENTITY-HEADER", headerValue); } else { connection.setRequestProperty("Secret", headerValue); } } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", StandardCharsets.UTF_8.name())); payload.append("&resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } } catch (IOException exception) { return Mono.error(exception); } String endpoint = TRAILING_FORWARD_SLASHES.matcher(options.getImdsAuthorityHost()).replaceAll("") + IdentityConstants.DEFAULT_IMDS_TOKENPATH; return checkIMDSAvailable(endpoint).flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw LOGGER.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode; try { responseCode = connection.getResponseCode(); } catch (Exception e) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } if (responseCode == 400) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established.", null)); } if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw LOGGER.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw LOGGER.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable(String endpoint) { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", StandardCharsets.UTF_8.name())); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("%s?%s", endpoint, payload)); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } catch (Exception e) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } private String getSafeWorkingDirectory() { if (isWindowsPlatform()) { if (CoreUtils.isNullOrEmpty(DEFAULT_WINDOWS_SYSTEM_ROOT)) { return null; } return DEFAULT_WINDOWS_SYSTEM_ROOT + "\\system32"; } else { return DEFAULT_MAC_LINUX_PATH; } } private boolean isWindowsPlatform() { return System.getProperty("os.name").contains("Windows"); } private String redactInfo(String input) { return ACCESS_TOKEN_PATTERN.matcher(input).replaceAll("****"); } void openUrl(String url) throws IOException { Runtime rt = Runtime.getRuntime(); String os = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (os.contains("win")) { rt.exec("rundll32 url.dll,FileProtocolHandler " + url); } else if (os.contains("mac")) { rt.exec("open " + url); } else if (os.contains("nix") || os.contains("nux")) { rt.exec("xdg-open " + url); } else { LOGGER.error("Browser could not be opened - please open {} in a browser on this device.", url); } } private CompletableFuture<IAuthenticationResult> getFailedCompletableFuture(Exception e) { CompletableFuture<IAuthenticationResult> completableFuture = new CompletableFuture<>(); completableFuture.completeExceptionally(e); return completableFuture; } private void initializeHttpPipelineAdapter() { HttpPipeline httpPipeline = options.getHttpPipeline(); if (httpPipeline != null) { httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline); } else { HttpClient httpClient = options.getHttpClient(); if (httpClient != null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient)); } else if (options.getProxyOptions() == null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault())); } } } /** * Get the configured tenant id. * * @return the tenant id. */ public String getTenantId() { return tenantId; } /** * Get the configured client id. * * @return the client id. */ public String getClientId() { return clientId; } /** * Get the configured identity client options. * * @return the client options. */ public IdentityClientOptions getIdentityClientOptions() { return options; } private boolean isADFSTenant() { return this.tenantId.equals(ADFS_TENANT); } private byte[] getCertificateBytes() throws IOException { if (certificatePath != null) { return Files.readAllBytes(Paths.get(certificatePath)); } else if (certificate != null) { ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); byte[] buffer = new byte[1024]; int read = certificate.read(buffer, 0, buffer.length); while (read != -1) { outputStream.write(buffer, 0, read); read = certificate.read(buffer, 0, buffer.length); } return outputStream.toByteArray(); } else { return new byte[0]; } } }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private static final String WINDOWS_STARTER = "cmd.exe"; private static final String LINUX_MAC_STARTER = "/bin/sh"; private static final String WINDOWS_SWITCHER = "/c"; private static final String LINUX_MAC_SWITCHER = "-c"; private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized"; private static final Pattern LINUX_MAC_PROCESS_ERROR_MESSAGE = Pattern.compile("(.*)az:(.*)not found"); private static final String DEFAULT_WINDOWS_SYSTEM_ROOT = System.getenv("SystemRoot"); private static final String DEFAULT_WINDOWS_PS_EXECUTABLE = "pwsh.exe"; private static final String LEGACY_WINDOWS_PS_EXECUTABLE = "powershell.exe"; private static final String DEFAULT_LINUX_PS_EXECUTABLE = "pwsh"; private static final String DEFAULT_MAC_LINUX_PATH = "/bin/"; private static final Duration REFRESH_OFFSET = Duration.ofMinutes(5); private static final String IDENTITY_ENDPOINT_VERSION = "2019-08-01"; private static final String MSI_ENDPOINT_VERSION = "2017-09-01"; private static final String ADFS_TENANT = "adfs"; private static final String HTTP_LOCALHOST = "http: private static final String SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION = "2019-07-01-preview"; private static final ClientLogger LOGGER = new ClientLogger(IdentityClient.class); private static final Pattern ACCESS_TOKEN_PATTERN = Pattern.compile("\"accessToken\": \"(.*?)(\"|$)"); private static final Pattern TRAILING_FORWARD_SLASHES = Pattern.compile("/+$"); private final IdentityClientOptions options; private final String tenantId; private final String clientId; private final String resourceId; private final String clientSecret; private final String clientAssertionFilePath; private final InputStream certificate; private final String certificatePath; private final Supplier<String> clientAssertionSupplier; private final String certificatePassword; private HttpPipelineAdapter httpPipelineAdapter; private final SynchronizedAccessor<PublicClientApplication> publicClientApplicationAccessor; private final SynchronizedAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor; private final SynchronizedAccessor<String> clientAssertionAccessor; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param clientSecret the client secret of the application. * @param resourceId the resource ID of the application * @param certificatePath the path to the PKCS12 or PEM certificate of the application. * @param certificate the PKCS12 or PEM certificate of the application. * @param certificatePassword the password protecting the PFX certificate. * @param isSharedTokenCacheCredential Indicate whether the credential is * {@link com.azure.identity.SharedTokenCacheCredential} or not. * @param clientAssertionTimeout the timeout to use for the client assertion. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, String clientSecret, String certificatePath, String clientAssertionFilePath, String resourceId, Supplier<String> clientAssertionSupplier, InputStream certificate, String certificatePassword, boolean isSharedTokenCacheCredential, Duration clientAssertionTimeout, IdentityClientOptions options) { if (tenantId == null) { tenantId = "organizations"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.resourceId = resourceId; this.clientSecret = clientSecret; this.clientAssertionFilePath = clientAssertionFilePath; this.certificatePath = certificatePath; this.certificate = certificate; this.certificatePassword = certificatePassword; this.clientAssertionSupplier = clientAssertionSupplier; this.options = options; this.publicClientApplicationAccessor = new SynchronizedAccessor<>(() -> getPublicClientApplication(isSharedTokenCacheCredential)); this.confidentialClientApplicationAccessor = new SynchronizedAccessor<>(() -> getConfidentialClientApplication()); this.clientAssertionAccessor = clientAssertionTimeout == null ? new SynchronizedAccessor<>(() -> parseClientAssertion(), Duration.ofMinutes(5)) : new SynchronizedAccessor<>(() -> parseClientAssertion(), clientAssertionTimeout); } private Mono<ConfidentialClientApplication> getConfidentialClientApplication() { return Mono.defer(() -> { if (clientId == null) { return Mono.error(LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication."))); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; IClientCredential credential; if (clientSecret != null) { credential = ClientCredentialFactory.createFromSecret(clientSecret); } else if (certificate != null || certificatePath != null) { try { if (certificatePassword == null) { byte[] pemCertificateBytes = getCertificateBytes(); List<X509Certificate> x509CertificateList = CertificateUtil.publicKeyFromPem(pemCertificateBytes); PrivateKey privateKey = CertificateUtil.privateKeyFromPem(pemCertificateBytes); if (x509CertificateList.size() == 1) { credential = ClientCredentialFactory.createFromCertificate( privateKey, x509CertificateList.get(0)); } else { credential = ClientCredentialFactory.createFromCertificateChain( privateKey, x509CertificateList); } } else { try (InputStream pfxCertificateStream = getCertificateInputStream()) { credential = ClientCredentialFactory.createFromCertificate(pfxCertificateStream, certificatePassword); } } } catch (IOException | GeneralSecurityException e) { return Mono.error(LOGGER.logExceptionAsError(new RuntimeException( "Failed to parse the certificate for the credential: " + e.getMessage(), e))); } } else if (clientAssertionSupplier != null) { credential = ClientCredentialFactory.createFromClientAssertion(clientAssertionSupplier.get()); } else { return Mono.error(LOGGER.logExceptionAsError( new IllegalArgumentException("Must provide client secret or client certificate path." + " To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, credential); try { applicationBuilder = applicationBuilder.authority(authorityUrl) .validateAuthority(options.getAuthorityValidation()); } catch (MalformedURLException e) { return Mono.error(LOGGER.logExceptionAsWarning(new IllegalStateException(e))); } applicationBuilder.sendX5c(options.isIncludeX5c()); initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl() .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); applicationBuilder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { return Mono.error(LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t))); } } if (options.getRegionalAuthority() != null) { if (options.getRegionalAuthority() == RegionalAuthority.AUTO_DISCOVER_REGION) { applicationBuilder.autoDetectRegion(true); } else { applicationBuilder.azureRegion(options.getRegionalAuthority().toString()); } } ConfidentialClientApplication confidentialClientApplication = applicationBuilder.build(); return tokenCache != null ? tokenCache.registerCache() .map(ignored -> confidentialClientApplication) : Mono.just(confidentialClientApplication); }); } private Mono<String> parseClientAssertion() { return Mono.fromCallable(() -> { if (clientAssertionFilePath != null) { byte[] encoded = Files.readAllBytes(Paths.get(clientAssertionFilePath)); return new String(encoded, StandardCharsets.UTF_8); } else { throw LOGGER.logExceptionAsError(new IllegalStateException( "Client Assertion File Path is not provided." + " It should be provided to authenticate with client assertion." )); } }); } private Mono<PublicClientApplication> getPublicClientApplication(boolean sharedTokenCacheCredential) { return Mono.defer(() -> { if (clientId == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl) .validateAuthority(options.getAuthorityValidation()); } catch (MalformedURLException e) { throw LOGGER.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { publicClientApplicationBuilder.httpClient(httpPipelineAdapter); } else { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { publicClientApplicationBuilder.executorService(options.getExecutorService()); } if (!options.isCp1Disabled()) { Set<String> set = new HashSet<>(1); set.add("CP1"); publicClientApplicationBuilder.clientCapabilities(set); } return Mono.just(publicClientApplicationBuilder); }).flatMap(builder -> { TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl() .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); builder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t)); } } PublicClientApplication publicClientApplication = builder.build(); return tokenCache != null ? tokenCache.registerCache() .map(ignored -> publicClientApplication) : Mono.just(publicClientApplication); }); } public Mono<MsalToken> authenticateWithIntelliJ(TokenRequestContext request) { try { IntelliJCacheAccessor cacheAccessor = new IntelliJCacheAccessor(options.getIntelliJKeePassDatabasePath()); IntelliJAuthMethodDetails authDetails; try { authDetails = cacheAccessor.getAuthDetailsIfAvailable(); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available.", e))); } if (authDetails == null) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available." + " Please log in with Azure Tools for IntelliJ plugin in the IDE."))); } String authType = authDetails.getAuthMethod(); if ("SP".equalsIgnoreCase(authType)) { Map<String, String> spDetails = cacheAccessor .getIntellijServicePrincipalDetails(authDetails.getCredFilePath()); String authorityUrl = spDetails.get("authURL") + spDetails.get("tenant"); try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(spDetails.get("client"), ClientCredentialFactory.createFromSecret(spDetails.get("key"))) .authority(authorityUrl).validateAuthority(options.getAuthorityValidation()); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } } else if ("DC".equalsIgnoreCase(authType)) { LOGGER.verbose("IntelliJ Authentication => Device Code Authentication scheme detected in Azure Tools" + " for IntelliJ Plugin."); if (isADFSTenant()) { LOGGER.verbose("IntelliJ Authentication => The input tenant is detected to be ADFS and" + " the ADFS tenants are not supported via IntelliJ Authentication currently."); return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJCredential " + "authentication unavailable. ADFS tenant/authorities are not supported."))); } try { JsonNode intelliJCredentials = cacheAccessor.getDeviceCodeCredentials(); String refreshToken = intelliJCredentials.get("refreshToken").textValue(); RefreshTokenParameters.RefreshTokenParametersBuilder refreshTokenParametersBuilder = RefreshTokenParameters.builder(new HashSet<>(request.getScopes()), refreshToken); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); refreshTokenParametersBuilder.claims(customClaimRequest); } return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(refreshTokenParametersBuilder.build())) .map(MsalToken::new)); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e)); } } else { LOGGER.verbose("IntelliJ Authentication = > Only Service Principal and Device Code Authentication" + " schemes are currently supported via IntelliJ Credential currently. Please ensure you used one" + " of those schemes from Azure Tools for IntelliJ plugin."); return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available." + " Please login with Azure Tools for IntelliJ plugin in the IDE."))); } } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { StringBuilder azCommand = new StringBuilder("az account get-access-token --output json --resource "); String scopes = ScopeUtil.scopesToResource(request.getScopes()); try { ScopeUtil.validateScope(scopes); } catch (IllegalArgumentException ex) { return Mono.error(LOGGER.logExceptionAsError(ex)); } azCommand.append(scopes); String tenant = IdentityUtil.resolveTenantId(null, request, options); if (!CoreUtils.isNullOrEmpty(tenant)) { azCommand.append("--tenant ").append(tenant); } AccessToken token; try { String starter; String switcher; if (isWindowsPlatform()) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, azCommand.toString()); String workingDirectory = getSafeWorkingDirectory(); if (workingDirectory != null) { builder.directory(new File(workingDirectory)); } else { throw LOGGER.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be" + " found to execute CLI command from. To mitigate this issue, please refer to the troubleshooting " + " guidelines here at https: } builder.redirectErrorStream(true); Process process = builder.start(); StringBuilder output = new StringBuilder(); try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8.name()))) { String line; while (true) { line = reader.readLine(); if (line == null) { break; } if (line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || LINUX_MAC_PROCESS_ERROR_MESSAGE.matcher(line).matches()) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable. Azure CLI not installed." + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } output.append(line); } } String processOutput = output.toString(); process.waitFor(10, TimeUnit.SECONDS); if (process.exitValue() != 0) { if (processOutput.length() > 0) { String redactedOutput = redactInfo(processOutput); if (redactedOutput.contains("az login") || redactedOutput.contains("az account set")) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable." + " Please run 'az login' to set up account. To further mitigate this" + " issue, please refer to the troubleshooting guidelines here at " + "https: } throw LOGGER.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null)); } else { throw LOGGER.logExceptionAsError( new ClientAuthenticationException("Failed to invoke Azure CLI ", null)); } } LOGGER.verbose("Azure CLI Authentication => A token response was received from Azure CLI, deserializing the" + " response into an Access Token."); Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME) .atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException | InterruptedException e) { throw LOGGER.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(e instanceof CredentialUnavailableException ? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e) : LOGGER.logExceptionAsError(e)); } return Mono.just(token); } /** * Asynchronously acquire a token from Active Directory with Azure Power Shell. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzurePowerShell(TokenRequestContext request) { List<CredentialUnavailableException> exceptions = new ArrayList<>(2); PowershellManager defaultPowerShellManager = new PowershellManager(Platform.isWindows() ? DEFAULT_WINDOWS_PS_EXECUTABLE : DEFAULT_LINUX_PS_EXECUTABLE); PowershellManager legacyPowerShellManager = Platform.isWindows() ? new PowershellManager(LEGACY_WINDOWS_PS_EXECUTABLE) : null; List<PowershellManager> powershellManagers = new ArrayList<>(2); powershellManagers.add(defaultPowerShellManager); if (legacyPowerShellManager != null) { powershellManagers.add(legacyPowerShellManager); } return Flux.fromIterable(powershellManagers) .flatMap(powershellManager -> getAccessTokenFromPowerShell(request, powershellManager) .onErrorResume(t -> { if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) { return Mono.error(new ClientAuthenticationException( "Azure Powershell authentication failed. Error Details: " + t.getMessage() + ". To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: null, t)); } exceptions.add((CredentialUnavailableException) t); return Mono.empty(); }), 1) .next() .switchIfEmpty(Mono.defer(() -> { CredentialUnavailableException last = exceptions.get(exceptions.size() - 1); for (int z = exceptions.size() - 2; z >= 0; z--) { CredentialUnavailableException current = exceptions.get(z); last = new CredentialUnavailableException("Azure PowerShell authentication failed using default" + "powershell(pwsh) with following error: " + current.getMessage() + "\r\n" + "Azure PowerShell authentication failed using powershell-core(powershell)" + " with following error: " + last.getMessage(), last.getCause()); } return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, (last))); })); } /** * Asynchronously acquire a token from Active Directory with Azure PowerShell. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithOBO(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> confidentialClient.acquireToken(OnBehalfOfParameters .builder(new HashSet<>(request.getScopes()), options.getUserAssertion()) .tenant(IdentityUtil.resolveTenantId(tenantId, request, options)) .build())) .map(MsalToken::new)); } private Mono<AccessToken> getAccessTokenFromPowerShell(TokenRequestContext request, PowershellManager powershellManager) { return powershellManager.initSession() .flatMap(manager -> { String azAccountsCommand = "Import-Module Az.Accounts -MinimumVersion 2.2.0 -PassThru"; return manager.runCommand(azAccountsCommand) .flatMap(output -> { if (output.contains("The specified module 'Az.Accounts' with version '2.2.0' was not loaded " + "because no valid module file")) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Az.Account module with version >= 2.2.0 is not installed. It needs to be installed to" + " use Azure PowerShell Credential."))); } LOGGER.verbose("Az.accounts module was found installed."); StringBuilder accessTokenCommand = new StringBuilder("Get-AzAccessToken -ResourceUrl "); accessTokenCommand.append(ScopeUtil.scopesToResource(request.getScopes())); accessTokenCommand.append(" | ConvertTo-Json"); String command = accessTokenCommand.toString(); LOGGER.verbose("Azure Powershell Authentication => Executing the command `%s` in Azure " + "Powershell to retrieve the Access Token.", accessTokenCommand); return manager.runCommand(accessTokenCommand.toString()) .flatMap(out -> { if (out.contains("Run Connect-AzAccount to login")) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Run Connect-AzAccount to login to Azure account in PowerShell."))); } try { LOGGER.verbose("Azure Powershell Authentication => Attempting to deserialize the " + "received response from Azure Powershell."); Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(out, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("Token"); String time = objectMap.get("ExpiresOn"); OffsetDateTime expiresOn = OffsetDateTime.parse(time) .withOffsetSameInstant(ZoneOffset.UTC); return Mono.just(new AccessToken(accessToken, expiresOn)); } catch (IOException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Encountered error when deserializing response from Azure Power Shell.", e))); } }); }); }).doFinally(ignored -> powershellManager.close()); } /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClient(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { ClientCredentialParameters.ClientCredentialParametersBuilder builder = ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); return confidentialClient.acquireToken(builder.build()); } )).map(MsalToken::new); } private HttpPipeline setupPipeline(HttpClient httpClient) { List<HttpPipelinePolicy> policies = new ArrayList<>(); HttpLogOptions httpLogOptions = new HttpLogOptions(); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RetryPolicy()); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder().httpClient(httpClient) .policies(policies.toArray(new HttpPipelinePolicy[0])).build(); } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { UserNamePasswordParameters.UserNamePasswordParametersBuilder userNamePasswordParametersBuilder = UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest .formatAsClaimsRequest(request.getClaims()); userNamePasswordParametersBuilder.claims(customClaimRequest); } userNamePasswordParametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); return pc.acquireToken(userNamePasswordParametersBuilder.build()); } )).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with username and " + "password. To mitigate this issue, please refer to the troubleshooting guidelines " + "here at https: null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @param account the account used to log in to acquire the last token * @return a Publisher that emits an AccessToken */ @SuppressWarnings("deprecation") public Mono<MsalToken> authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); parametersBuilder.forceRefresh(true); } if (account != null) { parametersBuilder = parametersBuilder.account(account); } parametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); try { return pc.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET))) .switchIfEmpty(Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder forceParametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())).forceRefresh(true); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest .formatAsClaimsRequest(request.getClaims()); forceParametersBuilder.claims(customClaimRequest); } if (account != null) { forceParametersBuilder = forceParametersBuilder.account(account); } forceParametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); try { return pc.acquireTokenSilently(forceParametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new))); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ @SuppressWarnings("deprecation") public Mono<AccessToken> authenticateWithConfidentialClientCache(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())) .tenant(IdentityUtil.resolveTenantId(tenantId, request, options)); try { return confidentialClient.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(ar -> (AccessToken) new MsalToken(ar)) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET)))); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return publicClientApplicationAccessor.getValue().flatMap(pc -> Mono.fromFuture(() -> { DeviceCodeFlowParameters.DeviceCodeFlowParametersBuilder parametersBuilder = DeviceCodeFlowParameters.builder( new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept( new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } return pc.acquireToken(parametersBuilder.build()); }).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with device code", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with Visual Studio cached refresh token. * * @param request the details of the token request * @return a Publisher that emits an AccessToken. */ public Mono<MsalToken> authenticateWithVsCodeCredential(TokenRequestContext request, String cloud) { if (isADFSTenant()) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("VsCodeCredential " + "authentication unavailable. ADFS tenant/authorities are not supported. " + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } VisualStudioCacheAccessor accessor = new VisualStudioCacheAccessor(); String credential = null; try { credential = accessor.getCredentials("VS Code Azure", cloud); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e)); } RefreshTokenParameters.RefreshTokenParametersBuilder parametersBuilder = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), credential); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parametersBuilder.build())) .onErrorResume(t -> { if (t instanceof MsalInteractionRequiredException) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("Failed to acquire token with" + " VS code credential." + " To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } return Mono.error(new ClientAuthenticationException("Failed to acquire token with" + " VS code credential", null, t)); }) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { AuthorizationCodeParameters.AuthorizationCodeParametersBuilder parametersBuilder = AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } Mono<IAuthenticationResult> acquireToken; if (clientSecret != null) { acquireToken = confidentialClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build()))); } else { acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build()))); } return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with authorization code", null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @param redirectUrl the redirect URL to listen on and receive security code * @param loginHint the username suggestion to pre-fill the login page's username/email address field * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, Integer port, String redirectUrl, String loginHint) { URI redirectUri; String redirect; if (port != null) { redirect = HTTP_LOCALHOST + ":" + port; } else if (redirectUrl != null) { redirect = redirectUrl; } else { redirect = HTTP_LOCALHOST; } try { redirectUri = new URI(redirect); } catch (URISyntaxException e) { return Mono.error(LOGGER.logExceptionAsError(new RuntimeException(e))); } InteractiveRequestParameters.InteractiveRequestParametersBuilder builder = InteractiveRequestParameters.builder(redirectUri) .scopes(new HashSet<>(request.getScopes())) .prompt(Prompt.SELECT_ACCOUNT) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); builder.claims(customClaimRequest); } if (loginHint != null) { builder.loginHint(loginHint); } Mono<IAuthenticationResult> acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(builder.build()))); return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with Interactive Browser Authentication.", null, t)).map(MsalToken::new); } /** * Gets token from shared token cache * */ public Mono<MsalToken> authenticateWithSharedTokenCache(TokenRequestContext request, String username) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.getAccounts()) .onErrorMap(t -> new CredentialUnavailableException( "Cannot get accounts from token cache. Error: " + t.getMessage(), t)) .flatMap(set -> { IAccount requestedAccount; Map<String, IAccount> accounts = new HashMap<>(); if (set.isEmpty()) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("SharedTokenCacheCredential " + "authentication unavailable. No accounts were found in the cache."))); } for (IAccount cached : set) { if (username == null || username.equals(cached.username())) { if (!accounts.containsKey(cached.homeAccountId())) { accounts.put(cached.homeAccountId(), cached); } } } if (accounts.isEmpty()) { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. No account matching the specified username: %s was " + "found in the cache.", username))); } else if (accounts.size() > 1) { if (username == null) { return Mono.error(new RuntimeException("SharedTokenCacheCredential authentication " + "unavailable. Multiple accounts were found in the cache. Use username and " + "tenant id to disambiguate.")); } else { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. Multiple accounts matching the specified username: " + "%s were found in the cache.", username))); } } else { requestedAccount = accounts.values().iterator().next(); } return authenticateWithPublicClientCache(request, requestedAccount); })); } /** * Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToArcManagedIdentityEndpoint(String identityEndpoint, TokenRequestContext request) { return Mono.fromCallable(() -> { HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(ScopeUtil.scopesToResource(request.getScopes()), StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode("2019-11-01", StandardCharsets.UTF_8.name())); URL url = new URL(String.format("%s?%s", identityEndpoint, payload)); String secretKey = null; try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); } catch (IOException e) { if (connection == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to initialize " + "Http URL connection to the endpoint.", null, e)); } int status = connection.getResponseCode(); if (status != 401) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException(String.format("Expected a 401" + " Unauthorized response from Azure Arc Managed Identity Endpoint, received: %d", status), null, e)); } String realm = connection.getHeaderField("WWW-Authenticate"); if (realm == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a value" + " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint", null)); } int separatorIndex = realm.indexOf("="); if (separatorIndex == -1) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a correct value" + " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint", null)); } String secretKeyPath = realm.substring(separatorIndex + 1); secretKey = new String(Files.readAllBytes(Paths.get(secretKeyPath)), StandardCharsets.UTF_8); } finally { if (connection != null) { connection.disconnect(); } } if (secretKey == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a secret value" + " in the response from Azure Arc Managed Identity Endpoint", null)); } try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Authorization", String.format("Basic %s", secretKey)); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner scanner = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = scanner.hasNext() ? scanner.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithExchangeToken(TokenRequestContext request) { return clientAssertionAccessor.getValue() .flatMap(assertionToken -> Mono.fromCallable(() -> { String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId + "/oauth2/v2.0/token"; StringBuilder urlParametersBuilder = new StringBuilder(); urlParametersBuilder.append("client_assertion="); urlParametersBuilder.append(assertionToken); urlParametersBuilder.append("&client_assertion_type=urn:ietf:params:oauth:client-assertion-type" + ":jwt-bearer"); urlParametersBuilder.append("&client_id="); urlParametersBuilder.append(clientId); urlParametersBuilder.append("&grant_type=client_credentials"); urlParametersBuilder.append("&scope="); urlParametersBuilder.append(URLEncoder.encode(request.getScopes().get(0), StandardCharsets.UTF_8.name())); String urlParams = urlParametersBuilder.toString(); byte[] postData = urlParams.getBytes(StandardCharsets.UTF_8); int postDataLength = postData.length; HttpURLConnection connection = null; URL url = new URL(authorityUrl); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("POST"); connection.setRequestProperty("Content-Type", "application/x-www-form-urlencoded"); connection.setRequestProperty("Content-Length", Integer.toString(postDataLength)); connection.setDoOutput(true); try (DataOutputStream outputStream = new DataOutputStream(connection.getOutputStream())) { outputStream.write(postData); } connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } })); } /** * Asynchronously acquire a token from the Azure Service Fabric Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param identityHeader the identity header to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToServiceFabricManagedIdentityEndpoint(String identityEndpoint, String identityHeader, String thumbprint, TokenRequestContext request) { return Mono.fromCallable(() -> { HttpsURLConnection connection = null; String endpoint = identityEndpoint; String headerValue = identityHeader; String endpointVersion = SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION; String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } try { URL url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpsURLConnection) url.openConnection(); IdentitySslUtil.addTrustedCertificateThumbprint(connection, thumbprint, LOGGER); connection.setRequestMethod("GET"); if (headerValue != null) { connection.setRequestProperty("Secret", headerValue); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param identityHeader the identity header to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String identityEndpoint, String identityHeader, TokenRequestContext request) { return Mono.fromCallable(() -> { String endpoint; String headerValue; String endpointVersion; endpoint = identityEndpoint; headerValue = identityHeader; endpointVersion = IDENTITY_ENDPOINT_VERSION; String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } try { URL url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (headerValue != null) { if (IDENTITY_ENDPOINT_VERSION.equals(endpointVersion)) { connection.setRequestProperty("X-IDENTITY-HEADER", headerValue); } else { connection.setRequestProperty("Secret", headerValue); } } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", StandardCharsets.UTF_8.name())); payload.append("&resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } } catch (IOException exception) { return Mono.error(exception); } String endpoint = TRAILING_FORWARD_SLASHES.matcher(options.getImdsAuthorityHost()).replaceAll("") + IdentityConstants.DEFAULT_IMDS_TOKENPATH; return checkIMDSAvailable(endpoint).flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw LOGGER.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode; try { responseCode = connection.getResponseCode(); } catch (Exception e) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } if (responseCode == 400) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established.", null)); } if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw LOGGER.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw LOGGER.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable(String endpoint) { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", StandardCharsets.UTF_8.name())); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("%s?%s", endpoint, payload)); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } catch (Exception e) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } private String getSafeWorkingDirectory() { if (isWindowsPlatform()) { if (CoreUtils.isNullOrEmpty(DEFAULT_WINDOWS_SYSTEM_ROOT)) { return null; } return DEFAULT_WINDOWS_SYSTEM_ROOT + "\\system32"; } else { return DEFAULT_MAC_LINUX_PATH; } } private boolean isWindowsPlatform() { return System.getProperty("os.name").contains("Windows"); } private String redactInfo(String input) { return ACCESS_TOKEN_PATTERN.matcher(input).replaceAll("****"); } void openUrl(String url) throws IOException { Runtime rt = Runtime.getRuntime(); String os = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (os.contains("win")) { rt.exec("rundll32 url.dll,FileProtocolHandler " + url); } else if (os.contains("mac")) { rt.exec("open " + url); } else if (os.contains("nix") || os.contains("nux")) { rt.exec("xdg-open " + url); } else { LOGGER.error("Browser could not be opened - please open {} in a browser on this device.", url); } } private CompletableFuture<IAuthenticationResult> getFailedCompletableFuture(Exception e) { CompletableFuture<IAuthenticationResult> completableFuture = new CompletableFuture<>(); completableFuture.completeExceptionally(e); return completableFuture; } private void initializeHttpPipelineAdapter() { HttpPipeline httpPipeline = options.getHttpPipeline(); if (httpPipeline != null) { httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline); } else { HttpClient httpClient = options.getHttpClient(); if (httpClient != null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient)); } else if (options.getProxyOptions() == null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault())); } } } /** * Get the configured tenant id. * * @return the tenant id. */ public String getTenantId() { return tenantId; } /** * Get the configured client id. * * @return the client id. */ public String getClientId() { return clientId; } /** * Get the configured identity client options. * * @return the client options. */ public IdentityClientOptions getIdentityClientOptions() { return options; } private boolean isADFSTenant() { return this.tenantId.equals(ADFS_TENANT); } private byte[] getCertificateBytes() throws IOException { if (certificatePath != null) { return Files.readAllBytes(Paths.get(certificatePath)); } else if (certificate != null) { ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); byte[] buffer = new byte[1024]; int read = certificate.read(buffer, 0, buffer.length); while (read != -1) { outputStream.write(buffer, 0, read); read = certificate.read(buffer, 0, buffer.length); } return outputStream.toByteArray(); } else { return new byte[0]; } } }
If certificate is null we return null anyways, so just return certificate no matter what
private InputStream getCertificateInputStream() throws IOException { if (certificatePath != null) { return new BufferedInputStream(new FileInputStream(certificatePath)); } else { return certificate; } }
return certificate;
private InputStream getCertificateInputStream() throws IOException { if (certificatePath != null) { return new BufferedInputStream(new FileInputStream(certificatePath)); } else { return certificate; } }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private static final String WINDOWS_STARTER = "cmd.exe"; private static final String LINUX_MAC_STARTER = "/bin/sh"; private static final String WINDOWS_SWITCHER = "/c"; private static final String LINUX_MAC_SWITCHER = "-c"; private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized"; private static final Pattern LINUX_MAC_PROCESS_ERROR_MESSAGE = Pattern.compile("(.*)az:(.*)not found"); private static final String DEFAULT_WINDOWS_SYSTEM_ROOT = System.getenv("SystemRoot"); private static final String DEFAULT_WINDOWS_PS_EXECUTABLE = "pwsh.exe"; private static final String LEGACY_WINDOWS_PS_EXECUTABLE = "powershell.exe"; private static final String DEFAULT_LINUX_PS_EXECUTABLE = "pwsh"; private static final String DEFAULT_MAC_LINUX_PATH = "/bin/"; private static final Duration REFRESH_OFFSET = Duration.ofMinutes(5); private static final String IDENTITY_ENDPOINT_VERSION = "2019-08-01"; private static final String MSI_ENDPOINT_VERSION = "2017-09-01"; private static final String ADFS_TENANT = "adfs"; private static final String HTTP_LOCALHOST = "http: private static final String SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION = "2019-07-01-preview"; private static final ClientLogger LOGGER = new ClientLogger(IdentityClient.class); private static final Pattern ACCESS_TOKEN_PATTERN = Pattern.compile("\"accessToken\": \"(.*?)(\"|$)"); private static final Pattern TRAILING_FORWARD_SLASHES = Pattern.compile("/+$"); private final IdentityClientOptions options; private final String tenantId; private final String clientId; private final String resourceId; private final String clientSecret; private final String clientAssertionFilePath; private final InputStream certificate; private final String certificatePath; private final Supplier<String> clientAssertionSupplier; private final String certificatePassword; private HttpPipelineAdapter httpPipelineAdapter; private final SynchronizedAccessor<PublicClientApplication> publicClientApplicationAccessor; private final SynchronizedAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor; private final SynchronizedAccessor<String> clientAssertionAccessor; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param clientSecret the client secret of the application. * @param resourceId the resource ID of the application * @param certificatePath the path to the PKCS12 or PEM certificate of the application. * @param certificate the PKCS12 or PEM certificate of the application. * @param certificatePassword the password protecting the PFX certificate. * @param isSharedTokenCacheCredential Indicate whether the credential is * {@link com.azure.identity.SharedTokenCacheCredential} or not. * @param clientAssertionTimeout the timeout to use for the client assertion. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, String clientSecret, String certificatePath, String clientAssertionFilePath, String resourceId, Supplier<String> clientAssertionSupplier, InputStream certificate, String certificatePassword, boolean isSharedTokenCacheCredential, Duration clientAssertionTimeout, IdentityClientOptions options) { if (tenantId == null) { tenantId = "organizations"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.resourceId = resourceId; this.clientSecret = clientSecret; this.clientAssertionFilePath = clientAssertionFilePath; this.certificatePath = certificatePath; this.certificate = certificate; this.certificatePassword = certificatePassword; this.clientAssertionSupplier = clientAssertionSupplier; this.options = options; this.publicClientApplicationAccessor = new SynchronizedAccessor<>(() -> getPublicClientApplication(isSharedTokenCacheCredential)); this.confidentialClientApplicationAccessor = new SynchronizedAccessor<>(() -> getConfidentialClientApplication()); this.clientAssertionAccessor = clientAssertionTimeout == null ? new SynchronizedAccessor<>(() -> parseClientAssertion(), Duration.ofMinutes(5)) : new SynchronizedAccessor<>(() -> parseClientAssertion(), clientAssertionTimeout); } private Mono<ConfidentialClientApplication> getConfidentialClientApplication() { return Mono.defer(() -> { if (clientId == null) { return Mono.error(LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication."))); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; IClientCredential credential; if (clientSecret != null) { credential = ClientCredentialFactory.createFromSecret(clientSecret); } else if (certificate != null || certificatePath != null) { try { if (certificatePassword == null) { byte[] pemCertificateBytes = getCertificateBytes(); List<X509Certificate> x509CertificateList = CertificateUtil.publicKeyFromPem(pemCertificateBytes); PrivateKey privateKey = CertificateUtil.privateKeyFromPem(pemCertificateBytes); if (x509CertificateList.size() == 1) { credential = ClientCredentialFactory.createFromCertificate( privateKey, x509CertificateList.get(0)); } else { credential = ClientCredentialFactory.createFromCertificateChain( privateKey, x509CertificateList); } } else { try (InputStream pfxCertificateStream = getCertificateInputStream()) { credential = ClientCredentialFactory.createFromCertificate(pfxCertificateStream, certificatePassword); } } } catch (IOException | GeneralSecurityException e) { return Mono.error(LOGGER.logExceptionAsError(new RuntimeException( "Failed to parse the certificate for the credential: " + e.getMessage(), e))); } } else if (clientAssertionSupplier != null) { credential = ClientCredentialFactory.createFromClientAssertion(clientAssertionSupplier.get()); } else { return Mono.error(LOGGER.logExceptionAsError( new IllegalArgumentException("Must provide client secret or client certificate path." + " To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, credential); try { applicationBuilder = applicationBuilder.authority(authorityUrl) .validateAuthority(options.getAuthorityValidation()); } catch (MalformedURLException e) { return Mono.error(LOGGER.logExceptionAsWarning(new IllegalStateException(e))); } applicationBuilder.sendX5c(options.isIncludeX5c()); initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl() .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); applicationBuilder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { return Mono.error(LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t))); } } if (options.getRegionalAuthority() != null) { if (options.getRegionalAuthority() == RegionalAuthority.AUTO_DISCOVER_REGION) { applicationBuilder.autoDetectRegion(true); } else { applicationBuilder.azureRegion(options.getRegionalAuthority().toString()); } } ConfidentialClientApplication confidentialClientApplication = applicationBuilder.build(); return tokenCache != null ? tokenCache.registerCache() .map(ignored -> confidentialClientApplication) : Mono.just(confidentialClientApplication); }); } private Mono<String> parseClientAssertion() { return Mono.fromCallable(() -> { if (clientAssertionFilePath != null) { byte[] encoded = Files.readAllBytes(Paths.get(clientAssertionFilePath)); return new String(encoded, StandardCharsets.UTF_8); } else { throw LOGGER.logExceptionAsError(new IllegalStateException( "Client Assertion File Path is not provided." + " It should be provided to authenticate with client assertion." )); } }); } private Mono<PublicClientApplication> getPublicClientApplication(boolean sharedTokenCacheCredential) { return Mono.defer(() -> { if (clientId == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl) .validateAuthority(options.getAuthorityValidation()); } catch (MalformedURLException e) { throw LOGGER.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { publicClientApplicationBuilder.httpClient(httpPipelineAdapter); } else { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { publicClientApplicationBuilder.executorService(options.getExecutorService()); } if (!options.isCp1Disabled()) { Set<String> set = new HashSet<>(1); set.add("CP1"); publicClientApplicationBuilder.clientCapabilities(set); } return Mono.just(publicClientApplicationBuilder); }).flatMap(builder -> { TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl() .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); builder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t)); } } PublicClientApplication publicClientApplication = builder.build(); return tokenCache != null ? tokenCache.registerCache() .map(ignored -> publicClientApplication) : Mono.just(publicClientApplication); }); } public Mono<MsalToken> authenticateWithIntelliJ(TokenRequestContext request) { try { IntelliJCacheAccessor cacheAccessor = new IntelliJCacheAccessor(options.getIntelliJKeePassDatabasePath()); IntelliJAuthMethodDetails authDetails; try { authDetails = cacheAccessor.getAuthDetailsIfAvailable(); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available.", e))); } if (authDetails == null) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available." + " Please log in with Azure Tools for IntelliJ plugin in the IDE."))); } String authType = authDetails.getAuthMethod(); if ("SP".equalsIgnoreCase(authType)) { Map<String, String> spDetails = cacheAccessor .getIntellijServicePrincipalDetails(authDetails.getCredFilePath()); String authorityUrl = spDetails.get("authURL") + spDetails.get("tenant"); try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(spDetails.get("client"), ClientCredentialFactory.createFromSecret(spDetails.get("key"))) .authority(authorityUrl).validateAuthority(options.getAuthorityValidation()); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } } else if ("DC".equalsIgnoreCase(authType)) { LOGGER.verbose("IntelliJ Authentication => Device Code Authentication scheme detected in Azure Tools" + " for IntelliJ Plugin."); if (isADFSTenant()) { LOGGER.verbose("IntelliJ Authentication => The input tenant is detected to be ADFS and" + " the ADFS tenants are not supported via IntelliJ Authentication currently."); return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJCredential " + "authentication unavailable. ADFS tenant/authorities are not supported."))); } try { JsonNode intelliJCredentials = cacheAccessor.getDeviceCodeCredentials(); String refreshToken = intelliJCredentials.get("refreshToken").textValue(); RefreshTokenParameters.RefreshTokenParametersBuilder refreshTokenParametersBuilder = RefreshTokenParameters.builder(new HashSet<>(request.getScopes()), refreshToken); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); refreshTokenParametersBuilder.claims(customClaimRequest); } return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(refreshTokenParametersBuilder.build())) .map(MsalToken::new)); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e)); } } else { LOGGER.verbose("IntelliJ Authentication = > Only Service Principal and Device Code Authentication" + " schemes are currently supported via IntelliJ Credential currently. Please ensure you used one" + " of those schemes from Azure Tools for IntelliJ plugin."); return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available." + " Please login with Azure Tools for IntelliJ plugin in the IDE."))); } } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { StringBuilder azCommand = new StringBuilder("az account get-access-token --output json --resource "); String scopes = ScopeUtil.scopesToResource(request.getScopes()); try { ScopeUtil.validateScope(scopes); } catch (IllegalArgumentException ex) { return Mono.error(LOGGER.logExceptionAsError(ex)); } azCommand.append(scopes); String tenant = IdentityUtil.resolveTenantId(null, request, options); if (!CoreUtils.isNullOrEmpty(tenant)) { azCommand.append("--tenant ").append(tenant); } AccessToken token; try { String starter; String switcher; if (isWindowsPlatform()) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, azCommand.toString()); String workingDirectory = getSafeWorkingDirectory(); if (workingDirectory != null) { builder.directory(new File(workingDirectory)); } else { throw LOGGER.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be" + " found to execute CLI command from. To mitigate this issue, please refer to the troubleshooting " + " guidelines here at https: } builder.redirectErrorStream(true); Process process = builder.start(); StringBuilder output = new StringBuilder(); try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8.name()))) { String line; while (true) { line = reader.readLine(); if (line == null) { break; } if (line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || LINUX_MAC_PROCESS_ERROR_MESSAGE.matcher(line).matches()) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable. Azure CLI not installed." + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } output.append(line); } } String processOutput = output.toString(); process.waitFor(10, TimeUnit.SECONDS); if (process.exitValue() != 0) { if (processOutput.length() > 0) { String redactedOutput = redactInfo(processOutput); if (redactedOutput.contains("az login") || redactedOutput.contains("az account set")) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable." + " Please run 'az login' to set up account. To further mitigate this" + " issue, please refer to the troubleshooting guidelines here at " + "https: } throw LOGGER.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null)); } else { throw LOGGER.logExceptionAsError( new ClientAuthenticationException("Failed to invoke Azure CLI ", null)); } } LOGGER.verbose("Azure CLI Authentication => A token response was received from Azure CLI, deserializing the" + " response into an Access Token."); Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME) .atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException | InterruptedException e) { throw LOGGER.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(e instanceof CredentialUnavailableException ? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e) : LOGGER.logExceptionAsError(e)); } return Mono.just(token); } /** * Asynchronously acquire a token from Active Directory with Azure Power Shell. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzurePowerShell(TokenRequestContext request) { List<CredentialUnavailableException> exceptions = new ArrayList<>(2); PowershellManager defaultPowerShellManager = new PowershellManager(Platform.isWindows() ? DEFAULT_WINDOWS_PS_EXECUTABLE : DEFAULT_LINUX_PS_EXECUTABLE); PowershellManager legacyPowerShellManager = Platform.isWindows() ? new PowershellManager(LEGACY_WINDOWS_PS_EXECUTABLE) : null; List<PowershellManager> powershellManagers = new ArrayList<>(2); powershellManagers.add(defaultPowerShellManager); if (legacyPowerShellManager != null) { powershellManagers.add(legacyPowerShellManager); } return Flux.fromIterable(powershellManagers) .flatMap(powershellManager -> getAccessTokenFromPowerShell(request, powershellManager) .onErrorResume(t -> { if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) { return Mono.error(new ClientAuthenticationException( "Azure Powershell authentication failed. Error Details: " + t.getMessage() + ". To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: null, t)); } exceptions.add((CredentialUnavailableException) t); return Mono.empty(); }), 1) .next() .switchIfEmpty(Mono.defer(() -> { CredentialUnavailableException last = exceptions.get(exceptions.size() - 1); for (int z = exceptions.size() - 2; z >= 0; z--) { CredentialUnavailableException current = exceptions.get(z); last = new CredentialUnavailableException("Azure PowerShell authentication failed using default" + "powershell(pwsh) with following error: " + current.getMessage() + "\r\n" + "Azure PowerShell authentication failed using powershell-core(powershell)" + " with following error: " + last.getMessage(), last.getCause()); } return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, (last))); })); } /** * Asynchronously acquire a token from Active Directory with Azure PowerShell. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithOBO(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> confidentialClient.acquireToken(OnBehalfOfParameters .builder(new HashSet<>(request.getScopes()), options.getUserAssertion()) .tenant(IdentityUtil.resolveTenantId(tenantId, request, options)) .build())) .map(MsalToken::new)); } private Mono<AccessToken> getAccessTokenFromPowerShell(TokenRequestContext request, PowershellManager powershellManager) { return powershellManager.initSession() .flatMap(manager -> { String azAccountsCommand = "Import-Module Az.Accounts -MinimumVersion 2.2.0 -PassThru"; return manager.runCommand(azAccountsCommand) .flatMap(output -> { if (output.contains("The specified module 'Az.Accounts' with version '2.2.0' was not loaded " + "because no valid module file")) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Az.Account module with version >= 2.2.0 is not installed. It needs to be installed to" + " use Azure PowerShell Credential."))); } LOGGER.verbose("Az.accounts module was found installed."); StringBuilder accessTokenCommand = new StringBuilder("Get-AzAccessToken -ResourceUrl "); accessTokenCommand.append(ScopeUtil.scopesToResource(request.getScopes())); accessTokenCommand.append(" | ConvertTo-Json"); String command = accessTokenCommand.toString(); LOGGER.verbose("Azure Powershell Authentication => Executing the command `%s` in Azure " + "Powershell to retrieve the Access Token.", accessTokenCommand); return manager.runCommand(accessTokenCommand.toString()) .flatMap(out -> { if (out.contains("Run Connect-AzAccount to login")) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Run Connect-AzAccount to login to Azure account in PowerShell."))); } try { LOGGER.verbose("Azure Powershell Authentication => Attempting to deserialize the " + "received response from Azure Powershell."); Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(out, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("Token"); String time = objectMap.get("ExpiresOn"); OffsetDateTime expiresOn = OffsetDateTime.parse(time) .withOffsetSameInstant(ZoneOffset.UTC); return Mono.just(new AccessToken(accessToken, expiresOn)); } catch (IOException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Encountered error when deserializing response from Azure Power Shell.", e))); } }); }); }).doFinally(ignored -> powershellManager.close()); } /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClient(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { ClientCredentialParameters.ClientCredentialParametersBuilder builder = ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); return confidentialClient.acquireToken(builder.build()); } )).map(MsalToken::new); } private HttpPipeline setupPipeline(HttpClient httpClient) { List<HttpPipelinePolicy> policies = new ArrayList<>(); HttpLogOptions httpLogOptions = new HttpLogOptions(); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RetryPolicy()); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder().httpClient(httpClient) .policies(policies.toArray(new HttpPipelinePolicy[0])).build(); } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { UserNamePasswordParameters.UserNamePasswordParametersBuilder userNamePasswordParametersBuilder = UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest .formatAsClaimsRequest(request.getClaims()); userNamePasswordParametersBuilder.claims(customClaimRequest); } userNamePasswordParametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); return pc.acquireToken(userNamePasswordParametersBuilder.build()); } )).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with username and " + "password. To mitigate this issue, please refer to the troubleshooting guidelines " + "here at https: null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @param account the account used to log in to acquire the last token * @return a Publisher that emits an AccessToken */ @SuppressWarnings("deprecation") public Mono<MsalToken> authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); parametersBuilder.forceRefresh(true); } if (account != null) { parametersBuilder = parametersBuilder.account(account); } parametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); try { return pc.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET))) .switchIfEmpty(Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder forceParametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())).forceRefresh(true); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest .formatAsClaimsRequest(request.getClaims()); forceParametersBuilder.claims(customClaimRequest); } if (account != null) { forceParametersBuilder = forceParametersBuilder.account(account); } forceParametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); try { return pc.acquireTokenSilently(forceParametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new))); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ @SuppressWarnings("deprecation") public Mono<AccessToken> authenticateWithConfidentialClientCache(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())) .tenant(IdentityUtil.resolveTenantId(tenantId, request, options)); try { return confidentialClient.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(ar -> (AccessToken) new MsalToken(ar)) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET)))); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return publicClientApplicationAccessor.getValue().flatMap(pc -> Mono.fromFuture(() -> { DeviceCodeFlowParameters.DeviceCodeFlowParametersBuilder parametersBuilder = DeviceCodeFlowParameters.builder( new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept( new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } return pc.acquireToken(parametersBuilder.build()); }).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with device code", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with Visual Studio cached refresh token. * * @param request the details of the token request * @return a Publisher that emits an AccessToken. */ public Mono<MsalToken> authenticateWithVsCodeCredential(TokenRequestContext request, String cloud) { if (isADFSTenant()) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("VsCodeCredential " + "authentication unavailable. ADFS tenant/authorities are not supported. " + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } VisualStudioCacheAccessor accessor = new VisualStudioCacheAccessor(); String credential = null; try { credential = accessor.getCredentials("VS Code Azure", cloud); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e)); } RefreshTokenParameters.RefreshTokenParametersBuilder parametersBuilder = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), credential); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parametersBuilder.build())) .onErrorResume(t -> { if (t instanceof MsalInteractionRequiredException) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("Failed to acquire token with" + " VS code credential." + " To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } return Mono.error(new ClientAuthenticationException("Failed to acquire token with" + " VS code credential", null, t)); }) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { AuthorizationCodeParameters.AuthorizationCodeParametersBuilder parametersBuilder = AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } Mono<IAuthenticationResult> acquireToken; if (clientSecret != null) { acquireToken = confidentialClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build()))); } else { acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build()))); } return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with authorization code", null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @param redirectUrl the redirect URL to listen on and receive security code * @param loginHint the username suggestion to pre-fill the login page's username/email address field * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, Integer port, String redirectUrl, String loginHint) { URI redirectUri; String redirect; if (port != null) { redirect = HTTP_LOCALHOST + ":" + port; } else if (redirectUrl != null) { redirect = redirectUrl; } else { redirect = HTTP_LOCALHOST; } try { redirectUri = new URI(redirect); } catch (URISyntaxException e) { return Mono.error(LOGGER.logExceptionAsError(new RuntimeException(e))); } InteractiveRequestParameters.InteractiveRequestParametersBuilder builder = InteractiveRequestParameters.builder(redirectUri) .scopes(new HashSet<>(request.getScopes())) .prompt(Prompt.SELECT_ACCOUNT) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); builder.claims(customClaimRequest); } if (loginHint != null) { builder.loginHint(loginHint); } Mono<IAuthenticationResult> acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(builder.build()))); return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with Interactive Browser Authentication.", null, t)).map(MsalToken::new); } /** * Gets token from shared token cache * */ public Mono<MsalToken> authenticateWithSharedTokenCache(TokenRequestContext request, String username) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.getAccounts()) .onErrorMap(t -> new CredentialUnavailableException( "Cannot get accounts from token cache. Error: " + t.getMessage(), t)) .flatMap(set -> { IAccount requestedAccount; Map<String, IAccount> accounts = new HashMap<>(); if (set.isEmpty()) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("SharedTokenCacheCredential " + "authentication unavailable. No accounts were found in the cache."))); } for (IAccount cached : set) { if (username == null || username.equals(cached.username())) { if (!accounts.containsKey(cached.homeAccountId())) { accounts.put(cached.homeAccountId(), cached); } } } if (accounts.isEmpty()) { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. No account matching the specified username: %s was " + "found in the cache.", username))); } else if (accounts.size() > 1) { if (username == null) { return Mono.error(new RuntimeException("SharedTokenCacheCredential authentication " + "unavailable. Multiple accounts were found in the cache. Use username and " + "tenant id to disambiguate.")); } else { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. Multiple accounts matching the specified username: " + "%s were found in the cache.", username))); } } else { requestedAccount = accounts.values().iterator().next(); } return authenticateWithPublicClientCache(request, requestedAccount); })); } /** * Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToArcManagedIdentityEndpoint(String identityEndpoint, TokenRequestContext request) { return Mono.fromCallable(() -> { HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(ScopeUtil.scopesToResource(request.getScopes()), StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode("2019-11-01", StandardCharsets.UTF_8.name())); URL url = new URL(String.format("%s?%s", identityEndpoint, payload)); String secretKey = null; try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); } catch (IOException e) { if (connection == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to initialize " + "Http URL connection to the endpoint.", null, e)); } int status = connection.getResponseCode(); if (status != 401) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException(String.format("Expected a 401" + " Unauthorized response from Azure Arc Managed Identity Endpoint, received: %d", status), null, e)); } String realm = connection.getHeaderField("WWW-Authenticate"); if (realm == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a value" + " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint", null)); } int separatorIndex = realm.indexOf("="); if (separatorIndex == -1) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a correct value" + " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint", null)); } String secretKeyPath = realm.substring(separatorIndex + 1); secretKey = new String(Files.readAllBytes(Paths.get(secretKeyPath)), StandardCharsets.UTF_8); } finally { if (connection != null) { connection.disconnect(); } } if (secretKey == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a secret value" + " in the response from Azure Arc Managed Identity Endpoint", null)); } try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Authorization", String.format("Basic %s", secretKey)); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner scanner = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = scanner.hasNext() ? scanner.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithExchangeToken(TokenRequestContext request) { return clientAssertionAccessor.getValue() .flatMap(assertionToken -> Mono.fromCallable(() -> { String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId + "/oauth2/v2.0/token"; StringBuilder urlParametersBuilder = new StringBuilder(); urlParametersBuilder.append("client_assertion="); urlParametersBuilder.append(assertionToken); urlParametersBuilder.append("&client_assertion_type=urn:ietf:params:oauth:client-assertion-type" + ":jwt-bearer"); urlParametersBuilder.append("&client_id="); urlParametersBuilder.append(clientId); urlParametersBuilder.append("&grant_type=client_credentials"); urlParametersBuilder.append("&scope="); urlParametersBuilder.append(URLEncoder.encode(request.getScopes().get(0), StandardCharsets.UTF_8.name())); String urlParams = urlParametersBuilder.toString(); byte[] postData = urlParams.getBytes(StandardCharsets.UTF_8); int postDataLength = postData.length; HttpURLConnection connection = null; URL url = new URL(authorityUrl); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("POST"); connection.setRequestProperty("Content-Type", "application/x-www-form-urlencoded"); connection.setRequestProperty("Content-Length", Integer.toString(postDataLength)); connection.setDoOutput(true); try (DataOutputStream outputStream = new DataOutputStream(connection.getOutputStream())) { outputStream.write(postData); } connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } })); } /** * Asynchronously acquire a token from the Azure Service Fabric Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param identityHeader the identity header to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToServiceFabricManagedIdentityEndpoint(String identityEndpoint, String identityHeader, String thumbprint, TokenRequestContext request) { return Mono.fromCallable(() -> { HttpsURLConnection connection = null; String endpoint = identityEndpoint; String headerValue = identityHeader; String endpointVersion = SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION; String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } try { URL url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpsURLConnection) url.openConnection(); IdentitySslUtil.addTrustedCertificateThumbprint(connection, thumbprint, LOGGER); connection.setRequestMethod("GET"); if (headerValue != null) { connection.setRequestProperty("Secret", headerValue); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param identityHeader the identity header to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String identityEndpoint, String identityHeader, TokenRequestContext request) { return Mono.fromCallable(() -> { String endpoint; String headerValue; String endpointVersion; endpoint = identityEndpoint; headerValue = identityHeader; endpointVersion = IDENTITY_ENDPOINT_VERSION; String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } try { URL url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (headerValue != null) { if (IDENTITY_ENDPOINT_VERSION.equals(endpointVersion)) { connection.setRequestProperty("X-IDENTITY-HEADER", headerValue); } else { connection.setRequestProperty("Secret", headerValue); } } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", StandardCharsets.UTF_8.name())); payload.append("&resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } } catch (IOException exception) { return Mono.error(exception); } String endpoint = TRAILING_FORWARD_SLASHES.matcher(options.getImdsAuthorityHost()).replaceAll("") + IdentityConstants.DEFAULT_IMDS_TOKENPATH; return checkIMDSAvailable(endpoint).flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw LOGGER.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode; try { responseCode = connection.getResponseCode(); } catch (Exception e) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } if (responseCode == 400) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established.", null)); } if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw LOGGER.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw LOGGER.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable(String endpoint) { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", StandardCharsets.UTF_8.name())); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("%s?%s", endpoint, payload)); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } catch (Exception e) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } private String getSafeWorkingDirectory() { if (isWindowsPlatform()) { if (CoreUtils.isNullOrEmpty(DEFAULT_WINDOWS_SYSTEM_ROOT)) { return null; } return DEFAULT_WINDOWS_SYSTEM_ROOT + "\\system32"; } else { return DEFAULT_MAC_LINUX_PATH; } } private boolean isWindowsPlatform() { return System.getProperty("os.name").contains("Windows"); } private String redactInfo(String input) { return ACCESS_TOKEN_PATTERN.matcher(input).replaceAll("****"); } void openUrl(String url) throws IOException { Runtime rt = Runtime.getRuntime(); String os = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (os.contains("win")) { rt.exec("rundll32 url.dll,FileProtocolHandler " + url); } else if (os.contains("mac")) { rt.exec("open " + url); } else if (os.contains("nix") || os.contains("nux")) { rt.exec("xdg-open " + url); } else { LOGGER.error("Browser could not be opened - please open {} in a browser on this device.", url); } } private CompletableFuture<IAuthenticationResult> getFailedCompletableFuture(Exception e) { CompletableFuture<IAuthenticationResult> completableFuture = new CompletableFuture<>(); completableFuture.completeExceptionally(e); return completableFuture; } private void initializeHttpPipelineAdapter() { HttpPipeline httpPipeline = options.getHttpPipeline(); if (httpPipeline != null) { httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline); } else { HttpClient httpClient = options.getHttpClient(); if (httpClient != null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient)); } else if (options.getProxyOptions() == null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault())); } } } /** * Get the configured tenant id. * * @return the tenant id. */ public String getTenantId() { return tenantId; } /** * Get the configured client id. * * @return the client id. */ public String getClientId() { return clientId; } /** * Get the configured identity client options. * * @return the client options. */ public IdentityClientOptions getIdentityClientOptions() { return options; } private boolean isADFSTenant() { return this.tenantId.equals(ADFS_TENANT); } private byte[] getCertificateBytes() throws IOException { if (certificatePath != null) { return Files.readAllBytes(Paths.get(certificatePath)); } else if (certificate != null) { ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); byte[] buffer = new byte[1024]; int read = certificate.read(buffer, 0, buffer.length); while (read != -1) { outputStream.write(buffer, 0, read); read = certificate.read(buffer, 0, buffer.length); } return outputStream.toByteArray(); } else { return new byte[0]; } } }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private static final String WINDOWS_STARTER = "cmd.exe"; private static final String LINUX_MAC_STARTER = "/bin/sh"; private static final String WINDOWS_SWITCHER = "/c"; private static final String LINUX_MAC_SWITCHER = "-c"; private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized"; private static final Pattern LINUX_MAC_PROCESS_ERROR_MESSAGE = Pattern.compile("(.*)az:(.*)not found"); private static final String DEFAULT_WINDOWS_SYSTEM_ROOT = System.getenv("SystemRoot"); private static final String DEFAULT_WINDOWS_PS_EXECUTABLE = "pwsh.exe"; private static final String LEGACY_WINDOWS_PS_EXECUTABLE = "powershell.exe"; private static final String DEFAULT_LINUX_PS_EXECUTABLE = "pwsh"; private static final String DEFAULT_MAC_LINUX_PATH = "/bin/"; private static final Duration REFRESH_OFFSET = Duration.ofMinutes(5); private static final String IDENTITY_ENDPOINT_VERSION = "2019-08-01"; private static final String MSI_ENDPOINT_VERSION = "2017-09-01"; private static final String ADFS_TENANT = "adfs"; private static final String HTTP_LOCALHOST = "http: private static final String SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION = "2019-07-01-preview"; private static final ClientLogger LOGGER = new ClientLogger(IdentityClient.class); private static final Pattern ACCESS_TOKEN_PATTERN = Pattern.compile("\"accessToken\": \"(.*?)(\"|$)"); private static final Pattern TRAILING_FORWARD_SLASHES = Pattern.compile("/+$"); private final IdentityClientOptions options; private final String tenantId; private final String clientId; private final String resourceId; private final String clientSecret; private final String clientAssertionFilePath; private final InputStream certificate; private final String certificatePath; private final Supplier<String> clientAssertionSupplier; private final String certificatePassword; private HttpPipelineAdapter httpPipelineAdapter; private final SynchronizedAccessor<PublicClientApplication> publicClientApplicationAccessor; private final SynchronizedAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor; private final SynchronizedAccessor<String> clientAssertionAccessor; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param clientSecret the client secret of the application. * @param resourceId the resource ID of the application * @param certificatePath the path to the PKCS12 or PEM certificate of the application. * @param certificate the PKCS12 or PEM certificate of the application. * @param certificatePassword the password protecting the PFX certificate. * @param isSharedTokenCacheCredential Indicate whether the credential is * {@link com.azure.identity.SharedTokenCacheCredential} or not. * @param clientAssertionTimeout the timeout to use for the client assertion. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, String clientSecret, String certificatePath, String clientAssertionFilePath, String resourceId, Supplier<String> clientAssertionSupplier, InputStream certificate, String certificatePassword, boolean isSharedTokenCacheCredential, Duration clientAssertionTimeout, IdentityClientOptions options) { if (tenantId == null) { tenantId = "organizations"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.resourceId = resourceId; this.clientSecret = clientSecret; this.clientAssertionFilePath = clientAssertionFilePath; this.certificatePath = certificatePath; this.certificate = certificate; this.certificatePassword = certificatePassword; this.clientAssertionSupplier = clientAssertionSupplier; this.options = options; this.publicClientApplicationAccessor = new SynchronizedAccessor<>(() -> getPublicClientApplication(isSharedTokenCacheCredential)); this.confidentialClientApplicationAccessor = new SynchronizedAccessor<>(() -> getConfidentialClientApplication()); this.clientAssertionAccessor = clientAssertionTimeout == null ? new SynchronizedAccessor<>(() -> parseClientAssertion(), Duration.ofMinutes(5)) : new SynchronizedAccessor<>(() -> parseClientAssertion(), clientAssertionTimeout); } private Mono<ConfidentialClientApplication> getConfidentialClientApplication() { return Mono.defer(() -> { if (clientId == null) { return Mono.error(LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication."))); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; IClientCredential credential; if (clientSecret != null) { credential = ClientCredentialFactory.createFromSecret(clientSecret); } else if (certificate != null || certificatePath != null) { try { if (certificatePassword == null) { byte[] pemCertificateBytes = getCertificateBytes(); List<X509Certificate> x509CertificateList = CertificateUtil.publicKeyFromPem(pemCertificateBytes); PrivateKey privateKey = CertificateUtil.privateKeyFromPem(pemCertificateBytes); if (x509CertificateList.size() == 1) { credential = ClientCredentialFactory.createFromCertificate( privateKey, x509CertificateList.get(0)); } else { credential = ClientCredentialFactory.createFromCertificateChain( privateKey, x509CertificateList); } } else { try (InputStream pfxCertificateStream = getCertificateInputStream()) { credential = ClientCredentialFactory.createFromCertificate(pfxCertificateStream, certificatePassword); } } } catch (IOException | GeneralSecurityException e) { return Mono.error(LOGGER.logExceptionAsError(new RuntimeException( "Failed to parse the certificate for the credential: " + e.getMessage(), e))); } } else if (clientAssertionSupplier != null) { credential = ClientCredentialFactory.createFromClientAssertion(clientAssertionSupplier.get()); } else { return Mono.error(LOGGER.logExceptionAsError( new IllegalArgumentException("Must provide client secret or client certificate path." + " To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, credential); try { applicationBuilder = applicationBuilder.authority(authorityUrl) .validateAuthority(options.getAuthorityValidation()); } catch (MalformedURLException e) { return Mono.error(LOGGER.logExceptionAsWarning(new IllegalStateException(e))); } applicationBuilder.sendX5c(options.isIncludeX5c()); initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl() .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); applicationBuilder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { return Mono.error(LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t))); } } if (options.getRegionalAuthority() != null) { if (options.getRegionalAuthority() == RegionalAuthority.AUTO_DISCOVER_REGION) { applicationBuilder.autoDetectRegion(true); } else { applicationBuilder.azureRegion(options.getRegionalAuthority().toString()); } } ConfidentialClientApplication confidentialClientApplication = applicationBuilder.build(); return tokenCache != null ? tokenCache.registerCache() .map(ignored -> confidentialClientApplication) : Mono.just(confidentialClientApplication); }); } private Mono<String> parseClientAssertion() { return Mono.fromCallable(() -> { if (clientAssertionFilePath != null) { byte[] encoded = Files.readAllBytes(Paths.get(clientAssertionFilePath)); return new String(encoded, StandardCharsets.UTF_8); } else { throw LOGGER.logExceptionAsError(new IllegalStateException( "Client Assertion File Path is not provided." + " It should be provided to authenticate with client assertion." )); } }); } private Mono<PublicClientApplication> getPublicClientApplication(boolean sharedTokenCacheCredential) { return Mono.defer(() -> { if (clientId == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl) .validateAuthority(options.getAuthorityValidation()); } catch (MalformedURLException e) { throw LOGGER.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { publicClientApplicationBuilder.httpClient(httpPipelineAdapter); } else { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { publicClientApplicationBuilder.executorService(options.getExecutorService()); } if (!options.isCp1Disabled()) { Set<String> set = new HashSet<>(1); set.add("CP1"); publicClientApplicationBuilder.clientCapabilities(set); } return Mono.just(publicClientApplicationBuilder); }).flatMap(builder -> { TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl() .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); builder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t)); } } PublicClientApplication publicClientApplication = builder.build(); return tokenCache != null ? tokenCache.registerCache() .map(ignored -> publicClientApplication) : Mono.just(publicClientApplication); }); } public Mono<MsalToken> authenticateWithIntelliJ(TokenRequestContext request) { try { IntelliJCacheAccessor cacheAccessor = new IntelliJCacheAccessor(options.getIntelliJKeePassDatabasePath()); IntelliJAuthMethodDetails authDetails; try { authDetails = cacheAccessor.getAuthDetailsIfAvailable(); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available.", e))); } if (authDetails == null) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available." + " Please log in with Azure Tools for IntelliJ plugin in the IDE."))); } String authType = authDetails.getAuthMethod(); if ("SP".equalsIgnoreCase(authType)) { Map<String, String> spDetails = cacheAccessor .getIntellijServicePrincipalDetails(authDetails.getCredFilePath()); String authorityUrl = spDetails.get("authURL") + spDetails.get("tenant"); try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(spDetails.get("client"), ClientCredentialFactory.createFromSecret(spDetails.get("key"))) .authority(authorityUrl).validateAuthority(options.getAuthorityValidation()); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } } else if ("DC".equalsIgnoreCase(authType)) { LOGGER.verbose("IntelliJ Authentication => Device Code Authentication scheme detected in Azure Tools" + " for IntelliJ Plugin."); if (isADFSTenant()) { LOGGER.verbose("IntelliJ Authentication => The input tenant is detected to be ADFS and" + " the ADFS tenants are not supported via IntelliJ Authentication currently."); return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJCredential " + "authentication unavailable. ADFS tenant/authorities are not supported."))); } try { JsonNode intelliJCredentials = cacheAccessor.getDeviceCodeCredentials(); String refreshToken = intelliJCredentials.get("refreshToken").textValue(); RefreshTokenParameters.RefreshTokenParametersBuilder refreshTokenParametersBuilder = RefreshTokenParameters.builder(new HashSet<>(request.getScopes()), refreshToken); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); refreshTokenParametersBuilder.claims(customClaimRequest); } return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(refreshTokenParametersBuilder.build())) .map(MsalToken::new)); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e)); } } else { LOGGER.verbose("IntelliJ Authentication = > Only Service Principal and Device Code Authentication" + " schemes are currently supported via IntelliJ Credential currently. Please ensure you used one" + " of those schemes from Azure Tools for IntelliJ plugin."); return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("IntelliJ Authentication not available." + " Please login with Azure Tools for IntelliJ plugin in the IDE."))); } } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { StringBuilder azCommand = new StringBuilder("az account get-access-token --output json --resource "); String scopes = ScopeUtil.scopesToResource(request.getScopes()); try { ScopeUtil.validateScope(scopes); } catch (IllegalArgumentException ex) { return Mono.error(LOGGER.logExceptionAsError(ex)); } azCommand.append(scopes); String tenant = IdentityUtil.resolveTenantId(null, request, options); if (!CoreUtils.isNullOrEmpty(tenant)) { azCommand.append("--tenant ").append(tenant); } AccessToken token; try { String starter; String switcher; if (isWindowsPlatform()) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, azCommand.toString()); String workingDirectory = getSafeWorkingDirectory(); if (workingDirectory != null) { builder.directory(new File(workingDirectory)); } else { throw LOGGER.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be" + " found to execute CLI command from. To mitigate this issue, please refer to the troubleshooting " + " guidelines here at https: } builder.redirectErrorStream(true); Process process = builder.start(); StringBuilder output = new StringBuilder(); try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8.name()))) { String line; while (true) { line = reader.readLine(); if (line == null) { break; } if (line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || LINUX_MAC_PROCESS_ERROR_MESSAGE.matcher(line).matches()) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable. Azure CLI not installed." + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } output.append(line); } } String processOutput = output.toString(); process.waitFor(10, TimeUnit.SECONDS); if (process.exitValue() != 0) { if (processOutput.length() > 0) { String redactedOutput = redactInfo(processOutput); if (redactedOutput.contains("az login") || redactedOutput.contains("az account set")) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable." + " Please run 'az login' to set up account. To further mitigate this" + " issue, please refer to the troubleshooting guidelines here at " + "https: } throw LOGGER.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null)); } else { throw LOGGER.logExceptionAsError( new ClientAuthenticationException("Failed to invoke Azure CLI ", null)); } } LOGGER.verbose("Azure CLI Authentication => A token response was received from Azure CLI, deserializing the" + " response into an Access Token."); Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME) .atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException | InterruptedException e) { throw LOGGER.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(e instanceof CredentialUnavailableException ? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e) : LOGGER.logExceptionAsError(e)); } return Mono.just(token); } /** * Asynchronously acquire a token from Active Directory with Azure Power Shell. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzurePowerShell(TokenRequestContext request) { List<CredentialUnavailableException> exceptions = new ArrayList<>(2); PowershellManager defaultPowerShellManager = new PowershellManager(Platform.isWindows() ? DEFAULT_WINDOWS_PS_EXECUTABLE : DEFAULT_LINUX_PS_EXECUTABLE); PowershellManager legacyPowerShellManager = Platform.isWindows() ? new PowershellManager(LEGACY_WINDOWS_PS_EXECUTABLE) : null; List<PowershellManager> powershellManagers = new ArrayList<>(2); powershellManagers.add(defaultPowerShellManager); if (legacyPowerShellManager != null) { powershellManagers.add(legacyPowerShellManager); } return Flux.fromIterable(powershellManagers) .flatMap(powershellManager -> getAccessTokenFromPowerShell(request, powershellManager) .onErrorResume(t -> { if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) { return Mono.error(new ClientAuthenticationException( "Azure Powershell authentication failed. Error Details: " + t.getMessage() + ". To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: null, t)); } exceptions.add((CredentialUnavailableException) t); return Mono.empty(); }), 1) .next() .switchIfEmpty(Mono.defer(() -> { CredentialUnavailableException last = exceptions.get(exceptions.size() - 1); for (int z = exceptions.size() - 2; z >= 0; z--) { CredentialUnavailableException current = exceptions.get(z); last = new CredentialUnavailableException("Azure PowerShell authentication failed using default" + "powershell(pwsh) with following error: " + current.getMessage() + "\r\n" + "Azure PowerShell authentication failed using powershell-core(powershell)" + " with following error: " + last.getMessage(), last.getCause()); } return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, (last))); })); } /** * Asynchronously acquire a token from Active Directory with Azure PowerShell. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithOBO(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> confidentialClient.acquireToken(OnBehalfOfParameters .builder(new HashSet<>(request.getScopes()), options.getUserAssertion()) .tenant(IdentityUtil.resolveTenantId(tenantId, request, options)) .build())) .map(MsalToken::new)); } private Mono<AccessToken> getAccessTokenFromPowerShell(TokenRequestContext request, PowershellManager powershellManager) { return powershellManager.initSession() .flatMap(manager -> { String azAccountsCommand = "Import-Module Az.Accounts -MinimumVersion 2.2.0 -PassThru"; return manager.runCommand(azAccountsCommand) .flatMap(output -> { if (output.contains("The specified module 'Az.Accounts' with version '2.2.0' was not loaded " + "because no valid module file")) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Az.Account module with version >= 2.2.0 is not installed. It needs to be installed to" + " use Azure PowerShell Credential."))); } LOGGER.verbose("Az.accounts module was found installed."); StringBuilder accessTokenCommand = new StringBuilder("Get-AzAccessToken -ResourceUrl "); accessTokenCommand.append(ScopeUtil.scopesToResource(request.getScopes())); accessTokenCommand.append(" | ConvertTo-Json"); String command = accessTokenCommand.toString(); LOGGER.verbose("Azure Powershell Authentication => Executing the command `%s` in Azure " + "Powershell to retrieve the Access Token.", accessTokenCommand); return manager.runCommand(accessTokenCommand.toString()) .flatMap(out -> { if (out.contains("Run Connect-AzAccount to login")) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Run Connect-AzAccount to login to Azure account in PowerShell."))); } try { LOGGER.verbose("Azure Powershell Authentication => Attempting to deserialize the " + "received response from Azure Powershell."); Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(out, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("Token"); String time = objectMap.get("ExpiresOn"); OffsetDateTime expiresOn = OffsetDateTime.parse(time) .withOffsetSameInstant(ZoneOffset.UTC); return Mono.just(new AccessToken(accessToken, expiresOn)); } catch (IOException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Encountered error when deserializing response from Azure Power Shell.", e))); } }); }); }).doFinally(ignored -> powershellManager.close()); } /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClient(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { ClientCredentialParameters.ClientCredentialParametersBuilder builder = ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); return confidentialClient.acquireToken(builder.build()); } )).map(MsalToken::new); } private HttpPipeline setupPipeline(HttpClient httpClient) { List<HttpPipelinePolicy> policies = new ArrayList<>(); HttpLogOptions httpLogOptions = new HttpLogOptions(); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RetryPolicy()); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder().httpClient(httpClient) .policies(policies.toArray(new HttpPipelinePolicy[0])).build(); } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { UserNamePasswordParameters.UserNamePasswordParametersBuilder userNamePasswordParametersBuilder = UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest .formatAsClaimsRequest(request.getClaims()); userNamePasswordParametersBuilder.claims(customClaimRequest); } userNamePasswordParametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); return pc.acquireToken(userNamePasswordParametersBuilder.build()); } )).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with username and " + "password. To mitigate this issue, please refer to the troubleshooting guidelines " + "here at https: null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @param account the account used to log in to acquire the last token * @return a Publisher that emits an AccessToken */ @SuppressWarnings("deprecation") public Mono<MsalToken> authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); parametersBuilder.forceRefresh(true); } if (account != null) { parametersBuilder = parametersBuilder.account(account); } parametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); try { return pc.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET))) .switchIfEmpty(Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder forceParametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())).forceRefresh(true); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest .formatAsClaimsRequest(request.getClaims()); forceParametersBuilder.claims(customClaimRequest); } if (account != null) { forceParametersBuilder = forceParametersBuilder.account(account); } forceParametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); try { return pc.acquireTokenSilently(forceParametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new))); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ @SuppressWarnings("deprecation") public Mono<AccessToken> authenticateWithConfidentialClientCache(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())) .tenant(IdentityUtil.resolveTenantId(tenantId, request, options)); try { return confidentialClient.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e))); } }).map(ar -> (AccessToken) new MsalToken(ar)) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET)))); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return publicClientApplicationAccessor.getValue().flatMap(pc -> Mono.fromFuture(() -> { DeviceCodeFlowParameters.DeviceCodeFlowParametersBuilder parametersBuilder = DeviceCodeFlowParameters.builder( new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept( new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } return pc.acquireToken(parametersBuilder.build()); }).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with device code", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with Visual Studio cached refresh token. * * @param request the details of the token request * @return a Publisher that emits an AccessToken. */ public Mono<MsalToken> authenticateWithVsCodeCredential(TokenRequestContext request, String cloud) { if (isADFSTenant()) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("VsCodeCredential " + "authentication unavailable. ADFS tenant/authorities are not supported. " + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } VisualStudioCacheAccessor accessor = new VisualStudioCacheAccessor(); String credential = null; try { credential = accessor.getCredentials("VS Code Azure", cloud); } catch (CredentialUnavailableException e) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e)); } RefreshTokenParameters.RefreshTokenParametersBuilder parametersBuilder = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), credential); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parametersBuilder.build())) .onErrorResume(t -> { if (t instanceof MsalInteractionRequiredException) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("Failed to acquire token with" + " VS code credential." + " To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } return Mono.error(new ClientAuthenticationException("Failed to acquire token with" + " VS code credential", null, t)); }) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { AuthorizationCodeParameters.AuthorizationCodeParametersBuilder parametersBuilder = AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(customClaimRequest); } Mono<IAuthenticationResult> acquireToken; if (clientSecret != null) { acquireToken = confidentialClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build()))); } else { acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build()))); } return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with authorization code", null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @param redirectUrl the redirect URL to listen on and receive security code * @param loginHint the username suggestion to pre-fill the login page's username/email address field * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, Integer port, String redirectUrl, String loginHint) { URI redirectUri; String redirect; if (port != null) { redirect = HTTP_LOCALHOST + ":" + port; } else if (redirectUrl != null) { redirect = redirectUrl; } else { redirect = HTTP_LOCALHOST; } try { redirectUri = new URI(redirect); } catch (URISyntaxException e) { return Mono.error(LOGGER.logExceptionAsError(new RuntimeException(e))); } InteractiveRequestParameters.InteractiveRequestParametersBuilder builder = InteractiveRequestParameters.builder(redirectUri) .scopes(new HashSet<>(request.getScopes())) .prompt(Prompt.SELECT_ACCOUNT) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims()); builder.claims(customClaimRequest); } if (loginHint != null) { builder.loginHint(loginHint); } Mono<IAuthenticationResult> acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(builder.build()))); return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with Interactive Browser Authentication.", null, t)).map(MsalToken::new); } /** * Gets token from shared token cache * */ public Mono<MsalToken> authenticateWithSharedTokenCache(TokenRequestContext request, String username) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.getAccounts()) .onErrorMap(t -> new CredentialUnavailableException( "Cannot get accounts from token cache. Error: " + t.getMessage(), t)) .flatMap(set -> { IAccount requestedAccount; Map<String, IAccount> accounts = new HashMap<>(); if (set.isEmpty()) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException("SharedTokenCacheCredential " + "authentication unavailable. No accounts were found in the cache."))); } for (IAccount cached : set) { if (username == null || username.equals(cached.username())) { if (!accounts.containsKey(cached.homeAccountId())) { accounts.put(cached.homeAccountId(), cached); } } } if (accounts.isEmpty()) { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. No account matching the specified username: %s was " + "found in the cache.", username))); } else if (accounts.size() > 1) { if (username == null) { return Mono.error(new RuntimeException("SharedTokenCacheCredential authentication " + "unavailable. Multiple accounts were found in the cache. Use username and " + "tenant id to disambiguate.")); } else { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. Multiple accounts matching the specified username: " + "%s were found in the cache.", username))); } } else { requestedAccount = accounts.values().iterator().next(); } return authenticateWithPublicClientCache(request, requestedAccount); })); } /** * Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToArcManagedIdentityEndpoint(String identityEndpoint, TokenRequestContext request) { return Mono.fromCallable(() -> { HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(ScopeUtil.scopesToResource(request.getScopes()), StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode("2019-11-01", StandardCharsets.UTF_8.name())); URL url = new URL(String.format("%s?%s", identityEndpoint, payload)); String secretKey = null; try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); } catch (IOException e) { if (connection == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to initialize " + "Http URL connection to the endpoint.", null, e)); } int status = connection.getResponseCode(); if (status != 401) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException(String.format("Expected a 401" + " Unauthorized response from Azure Arc Managed Identity Endpoint, received: %d", status), null, e)); } String realm = connection.getHeaderField("WWW-Authenticate"); if (realm == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a value" + " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint", null)); } int separatorIndex = realm.indexOf("="); if (separatorIndex == -1) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a correct value" + " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint", null)); } String secretKeyPath = realm.substring(separatorIndex + 1); secretKey = new String(Files.readAllBytes(Paths.get(secretKeyPath)), StandardCharsets.UTF_8); } finally { if (connection != null) { connection.disconnect(); } } if (secretKey == null) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a secret value" + " in the response from Azure Arc Managed Identity Endpoint", null)); } try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Authorization", String.format("Basic %s", secretKey)); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner scanner = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = scanner.hasNext() ? scanner.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithExchangeToken(TokenRequestContext request) { return clientAssertionAccessor.getValue() .flatMap(assertionToken -> Mono.fromCallable(() -> { String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId + "/oauth2/v2.0/token"; StringBuilder urlParametersBuilder = new StringBuilder(); urlParametersBuilder.append("client_assertion="); urlParametersBuilder.append(assertionToken); urlParametersBuilder.append("&client_assertion_type=urn:ietf:params:oauth:client-assertion-type" + ":jwt-bearer"); urlParametersBuilder.append("&client_id="); urlParametersBuilder.append(clientId); urlParametersBuilder.append("&grant_type=client_credentials"); urlParametersBuilder.append("&scope="); urlParametersBuilder.append(URLEncoder.encode(request.getScopes().get(0), StandardCharsets.UTF_8.name())); String urlParams = urlParametersBuilder.toString(); byte[] postData = urlParams.getBytes(StandardCharsets.UTF_8); int postDataLength = postData.length; HttpURLConnection connection = null; URL url = new URL(authorityUrl); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("POST"); connection.setRequestProperty("Content-Type", "application/x-www-form-urlencoded"); connection.setRequestProperty("Content-Length", Integer.toString(postDataLength)); connection.setDoOutput(true); try (DataOutputStream outputStream = new DataOutputStream(connection.getOutputStream())) { outputStream.write(postData); } connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } })); } /** * Asynchronously acquire a token from the Azure Service Fabric Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param identityHeader the identity header to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToServiceFabricManagedIdentityEndpoint(String identityEndpoint, String identityHeader, String thumbprint, TokenRequestContext request) { return Mono.fromCallable(() -> { HttpsURLConnection connection = null; String endpoint = identityEndpoint; String headerValue = identityHeader; String endpointVersion = SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION; String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } try { URL url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpsURLConnection) url.openConnection(); IdentitySslUtil.addTrustedCertificateThumbprint(connection, thumbprint, LOGGER); connection.setRequestMethod("GET"); if (headerValue != null) { connection.setRequestProperty("Secret", headerValue); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param identityEndpoint the Identity endpoint to acquire token from * @param identityHeader the identity header to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String identityEndpoint, String identityHeader, TokenRequestContext request) { return Mono.fromCallable(() -> { String endpoint; String headerValue; String endpointVersion; endpoint = identityEndpoint; headerValue = identityHeader; endpointVersion = IDENTITY_ENDPOINT_VERSION; String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); payload.append("&api-version="); payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } try { URL url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (headerValue != null) { if (IDENTITY_ENDPOINT_VERSION.equals(endpointVersion)) { connection.setRequestProperty("X-IDENTITY-HEADER", headerValue); } else { connection.setRequestProperty("Secret", headerValue); } } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", StandardCharsets.UTF_8.name())); payload.append("&resource="); payload.append(URLEncoder.encode(resource, StandardCharsets.UTF_8.name())); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, StandardCharsets.UTF_8.name())); } if (resourceId != null) { payload.append("&mi_res_id="); payload.append(URLEncoder.encode(resourceId, StandardCharsets.UTF_8.name())); } } catch (IOException exception) { return Mono.error(exception); } String endpoint = TRAILING_FORWARD_SLASHES.matcher(options.getImdsAuthorityHost()).replaceAll("") + IdentityConstants.DEFAULT_IMDS_TOKENPATH; return checkIMDSAvailable(endpoint).flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("%s?%s", endpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw LOGGER.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode; try { responseCode = connection.getResponseCode(); } catch (Exception e) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } if (responseCode == 400) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established.", null)); } if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw LOGGER.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw LOGGER.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable(String endpoint) { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", StandardCharsets.UTF_8.name())); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("%s?%s", endpoint, payload)); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } catch (Exception e) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } private String getSafeWorkingDirectory() { if (isWindowsPlatform()) { if (CoreUtils.isNullOrEmpty(DEFAULT_WINDOWS_SYSTEM_ROOT)) { return null; } return DEFAULT_WINDOWS_SYSTEM_ROOT + "\\system32"; } else { return DEFAULT_MAC_LINUX_PATH; } } private boolean isWindowsPlatform() { return System.getProperty("os.name").contains("Windows"); } private String redactInfo(String input) { return ACCESS_TOKEN_PATTERN.matcher(input).replaceAll("****"); } void openUrl(String url) throws IOException { Runtime rt = Runtime.getRuntime(); String os = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (os.contains("win")) { rt.exec("rundll32 url.dll,FileProtocolHandler " + url); } else if (os.contains("mac")) { rt.exec("open " + url); } else if (os.contains("nix") || os.contains("nux")) { rt.exec("xdg-open " + url); } else { LOGGER.error("Browser could not be opened - please open {} in a browser on this device.", url); } } private CompletableFuture<IAuthenticationResult> getFailedCompletableFuture(Exception e) { CompletableFuture<IAuthenticationResult> completableFuture = new CompletableFuture<>(); completableFuture.completeExceptionally(e); return completableFuture; } private void initializeHttpPipelineAdapter() { HttpPipeline httpPipeline = options.getHttpPipeline(); if (httpPipeline != null) { httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline); } else { HttpClient httpClient = options.getHttpClient(); if (httpClient != null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient)); } else if (options.getProxyOptions() == null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault())); } } } /** * Get the configured tenant id. * * @return the tenant id. */ public String getTenantId() { return tenantId; } /** * Get the configured client id. * * @return the client id. */ public String getClientId() { return clientId; } /** * Get the configured identity client options. * * @return the client options. */ public IdentityClientOptions getIdentityClientOptions() { return options; } private boolean isADFSTenant() { return this.tenantId.equals(ADFS_TENANT); } private byte[] getCertificateBytes() throws IOException { if (certificatePath != null) { return Files.readAllBytes(Paths.get(certificatePath)); } else if (certificate != null) { ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); byte[] buffer = new byte[1024]; int read = certificate.read(buffer, 0, buffer.length); while (read != -1) { outputStream.write(buffer, 0, read); read = certificate.read(buffer, 0, buffer.length); } return outputStream.toByteArray(); } else { return new byte[0]; } } }
Replace `Pattern` usage with substring as we check if the string begins with the value that we regex replaceFirst, effectively a substring operation
public JsonNode getDeviceCodeCredentials() throws IOException { if (Platform.isMac()) { KeyChainAccessor accessor = new KeyChainAccessor(null, "ADAuthManager", "cachedAuthResult"); String jsonCred = new String(accessor.read(), StandardCharsets.UTF_8); return DEFAULT_MAPPER.readTree(jsonCred); } else if (Platform.isLinux()) { LinuxKeyRingAccessor accessor = new LinuxKeyRingAccessor( "com.intellij.credentialStore.Credential", "service", "ADAuthManager", "account", "cachedAuthResult"); String jsonCred = new String(accessor.read(), StandardCharsets.UTF_8); if (jsonCred.startsWith("cachedAuthResult@")) { jsonCred = jsonCred.substring("cachedAuthResult@".length()); } return DEFAULT_MAPPER.readTree(jsonCred); } else if (Platform.isWindows()) { return getCredentialFromKdbx(); } else { throw LOGGER.logExceptionAsError(new RuntimeException(String.format("OS %s Platform not supported.", Platform.getOSType()))); } }
jsonCred = jsonCred.substring("cachedAuthResult@".length());
public JsonNode getDeviceCodeCredentials() throws IOException { if (Platform.isMac()) { KeyChainAccessor accessor = new KeyChainAccessor(null, "ADAuthManager", "cachedAuthResult"); String jsonCred = new String(accessor.read(), StandardCharsets.UTF_8); return DEFAULT_MAPPER.readTree(jsonCred); } else if (Platform.isLinux()) { LinuxKeyRingAccessor accessor = new LinuxKeyRingAccessor( "com.intellij.credentialStore.Credential", "service", "ADAuthManager", "account", "cachedAuthResult"); String jsonCred = new String(accessor.read(), StandardCharsets.UTF_8); if (jsonCred.startsWith("cachedAuthResult@")) { jsonCred = jsonCred.substring("cachedAuthResult@".length()); } return DEFAULT_MAPPER.readTree(jsonCred); } else if (Platform.isWindows()) { return getCredentialFromKdbx(); } else { throw LOGGER.logExceptionAsError(new RuntimeException(String.format("OS %s Platform not supported.", Platform.getOSType()))); } }
class IntelliJCacheAccessor { private static final ClientLogger LOGGER = new ClientLogger(IntelliJCacheAccessor.class); private final String keePassDatabasePath; private static final byte[] CRYPTO_KEY = new byte[] {0x50, 0x72, 0x6f, 0x78, 0x79, 0x20, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x20, 0x53, 0x65, 0x63}; private static final ObjectMapper DEFAULT_MAPPER = new ObjectMapper(); private static final ObjectMapper DONT_FAIL_ON_UNKNOWN_PROPERTIES_MAPPER = new ObjectMapper() .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); /** * Creates an instance of {@link IntelliJCacheAccessor} * * @param keePassDatabasePath the KeePass database path. */ public IntelliJCacheAccessor(String keePassDatabasePath) { this.keePassDatabasePath = keePassDatabasePath; } private List<String> getAzureToolsForIntelliJPluginConfigPaths() { return Arrays.asList(Paths.get(System.getProperty("user.home"), "AzureToolsForIntelliJ").toString(), Paths.get(System.getProperty("user.home"), ".AzureToolsForIntelliJ").toString()); } /** * Get the Device Code credential details of Azure Tools plugin in the IntelliJ IDE. * * @return the {@link JsonNode} holding the authentication details. * @throws IOException If an I/O error occurs. */ /** * Get the Service Principal credential details of Azure Tools plugin in the IntelliJ IDE. * * @param credFilePath the file path holding authentication details * @return the {@link HashMap} holding auth details. * @throws IOException if an error is countered while reading the credential file. */ public Map<String, String> getIntellijServicePrincipalDetails(String credFilePath) throws IOException { BufferedReader reader = null; HashMap<String, String> servicePrincipalDetails = new HashMap<>(8); try { reader = new BufferedReader(new FileReader(credFilePath)); String line = reader.readLine(); while (line != null) { String[] split = line.split("="); split[1] = split[1].replace("\\", ""); servicePrincipalDetails.put(split[0], split[1]); line = reader.readLine(); } } finally { if (reader != null) { reader.close(); } } return servicePrincipalDetails; } @SuppressWarnings({"rawtypes", "unchecked"}) private JsonNode getCredentialFromKdbx() throws IOException { if (CoreUtils.isNullOrEmpty(keePassDatabasePath)) { throw new CredentialUnavailableException("The KeePass database path is either empty or not configured." + " Please configure it on the builder. It is required to use " + "IntelliJ credential on the windows platform."); } String extractedpwd = getKdbxPassword(); SecretKeySpec key = new SecretKeySpec(CRYPTO_KEY, "AES"); String password; byte[] dataToDecrypt = Crypt32Util.cryptUnprotectData(Base64.getDecoder().decode(extractedpwd)); ByteBuffer decryptBuffer = ByteBuffer.wrap(dataToDecrypt); Cipher cipher; try { cipher = Cipher.getInstance("AES/CBC/PKCS5Padding"); int ivLen = decryptBuffer.getInt(); cipher.init(Cipher.DECRYPT_MODE, key, new IvParameterSpec(dataToDecrypt, decryptBuffer.position(), ivLen)); int dataOffset = decryptBuffer.position() + ivLen; byte[] decrypted = cipher.doFinal(dataToDecrypt, dataOffset, dataToDecrypt.length - dataOffset); password = new String(decrypted, StandardCharsets.UTF_8); } catch (NoSuchAlgorithmException | NoSuchPaddingException | InvalidKeyException | InvalidAlgorithmParameterException | IllegalBlockSizeException | BadPaddingException e) { throw LOGGER.logExceptionAsError(new RuntimeException("Unable to access cache.", e)); } try (InputStream inputStream = new FileInputStream(keePassDatabasePath)) { IntelliJKdbxDatabase kdbxDatabase = IntelliJKdbxDatabase.parse(inputStream, password); String jsonToken = kdbxDatabase.getDatabaseEntryValue("ADAuthManager"); if (CoreUtils.isNullOrEmpty(jsonToken)) { throw new CredentialUnavailableException("No credentials found in the cache." + " Please login with IntelliJ Azure Tools plugin in the IDE."); } return DEFAULT_MAPPER.readTree(jsonToken); } catch (Exception e) { throw LOGGER.logExceptionAsError(new RuntimeException("Failed to read KeePass database.", e)); } } private String getKdbxPassword() throws IOException { String passwordFilePath = new File(keePassDatabasePath).getParent() + File.separator + "c.pwd"; String extractedpwd = ""; try (BufferedReader reader = new BufferedReader(new FileReader(passwordFilePath))) { String line = reader.readLine(); while (line != null) { if (line.contains("value")) { String[] tokens = line.split(" "); if (tokens.length == 3) { extractedpwd = tokens[2]; break; } else { throw LOGGER.logExceptionAsError(new RuntimeException("Password not found in the file.")); } } line = reader.readLine(); } } return extractedpwd; } /** * Get the auth host of the specified {@code azureEnvironment}. * @param azureEnvironment the specified Azure Environment * @return the auth host. */ public String getAzureAuthHost(String azureEnvironment) { switch (azureEnvironment) { case "GLOBAL": return AzureAuthorityHosts.AZURE_PUBLIC_CLOUD; case "CHINA": return AzureAuthorityHosts.AZURE_CHINA; case "GERMAN": return AzureAuthorityHosts.AZURE_GERMANY; case "US_GOVERNMENT": return AzureAuthorityHosts.AZURE_GOVERNMENT; default: return AzureAuthorityHosts.AZURE_PUBLIC_CLOUD; } } /** * Parse the auth details of the specified file. * @param file the file input; * @return the parsed {@link IntelliJAuthMethodDetails} from the file input. * @throws IOException when invalid file path is specified. */ public IntelliJAuthMethodDetails parseAuthMethodDetails(File file) throws IOException { return DONT_FAIL_ON_UNKNOWN_PROPERTIES_MAPPER.readValue(file, IntelliJAuthMethodDetails.class); } /** * Get the current authentication method details of Azure Tools plugin in IntelliJ IDE. * * @return the {@link IntelliJAuthMethodDetails} * @throws IOException if an error is encountered while reading the auth details file. */ public IntelliJAuthMethodDetails getAuthDetailsIfAvailable() throws IOException { File authFile = null; for (String metadataPath : getAzureToolsForIntelliJPluginConfigPaths()) { String authMethodDetailsPath = Paths.get(metadataPath, "AuthMethodDetails.json").toString(); authFile = new File(authMethodDetailsPath); if (authFile.exists()) { break; } } if (authFile == null || !authFile.exists()) { return null; } IntelliJAuthMethodDetails authMethodDetails = parseAuthMethodDetails(authFile); String authType = authMethodDetails.getAuthMethod(); if (CoreUtils.isNullOrEmpty(authType)) { return null; } if ("SP".equalsIgnoreCase(authType)) { if (CoreUtils.isNullOrEmpty(authMethodDetails.getCredFilePath())) { return null; } } else if ("DC".equalsIgnoreCase(authType)) { if (CoreUtils.isNullOrEmpty(authMethodDetails.getAccountEmail())) { return null; } } return authMethodDetails; } }
class IntelliJCacheAccessor { private static final ClientLogger LOGGER = new ClientLogger(IntelliJCacheAccessor.class); private final String keePassDatabasePath; private static final byte[] CRYPTO_KEY = new byte[] {0x50, 0x72, 0x6f, 0x78, 0x79, 0x20, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x20, 0x53, 0x65, 0x63}; private static final ObjectMapper DEFAULT_MAPPER = new ObjectMapper(); private static final ObjectMapper DONT_FAIL_ON_UNKNOWN_PROPERTIES_MAPPER = new ObjectMapper() .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); /** * Creates an instance of {@link IntelliJCacheAccessor} * * @param keePassDatabasePath the KeePass database path. */ public IntelliJCacheAccessor(String keePassDatabasePath) { this.keePassDatabasePath = keePassDatabasePath; } private List<String> getAzureToolsForIntelliJPluginConfigPaths() { return Arrays.asList(Paths.get(System.getProperty("user.home"), "AzureToolsForIntelliJ").toString(), Paths.get(System.getProperty("user.home"), ".AzureToolsForIntelliJ").toString()); } /** * Get the Device Code credential details of Azure Tools plugin in the IntelliJ IDE. * * @return the {@link JsonNode} holding the authentication details. * @throws IOException If an I/O error occurs. */ /** * Get the Service Principal credential details of Azure Tools plugin in the IntelliJ IDE. * * @param credFilePath the file path holding authentication details * @return the {@link HashMap} holding auth details. * @throws IOException if an error is countered while reading the credential file. */ public Map<String, String> getIntellijServicePrincipalDetails(String credFilePath) throws IOException { BufferedReader reader = null; HashMap<String, String> servicePrincipalDetails = new HashMap<>(8); try { reader = new BufferedReader(new FileReader(credFilePath)); String line = reader.readLine(); while (line != null) { String[] split = line.split("="); split[1] = split[1].replace("\\", ""); servicePrincipalDetails.put(split[0], split[1]); line = reader.readLine(); } } finally { if (reader != null) { reader.close(); } } return servicePrincipalDetails; } @SuppressWarnings({"rawtypes", "unchecked"}) private JsonNode getCredentialFromKdbx() throws IOException { if (CoreUtils.isNullOrEmpty(keePassDatabasePath)) { throw new CredentialUnavailableException("The KeePass database path is either empty or not configured." + " Please configure it on the builder. It is required to use " + "IntelliJ credential on the windows platform."); } String extractedpwd = getKdbxPassword(); SecretKeySpec key = new SecretKeySpec(CRYPTO_KEY, "AES"); String password; byte[] dataToDecrypt = Crypt32Util.cryptUnprotectData(Base64.getDecoder().decode(extractedpwd)); ByteBuffer decryptBuffer = ByteBuffer.wrap(dataToDecrypt); Cipher cipher; try { cipher = Cipher.getInstance("AES/CBC/PKCS5Padding"); int ivLen = decryptBuffer.getInt(); cipher.init(Cipher.DECRYPT_MODE, key, new IvParameterSpec(dataToDecrypt, decryptBuffer.position(), ivLen)); int dataOffset = decryptBuffer.position() + ivLen; byte[] decrypted = cipher.doFinal(dataToDecrypt, dataOffset, dataToDecrypt.length - dataOffset); password = new String(decrypted, StandardCharsets.UTF_8); } catch (NoSuchAlgorithmException | NoSuchPaddingException | InvalidKeyException | InvalidAlgorithmParameterException | IllegalBlockSizeException | BadPaddingException e) { throw LOGGER.logExceptionAsError(new RuntimeException("Unable to access cache.", e)); } try (InputStream inputStream = new FileInputStream(keePassDatabasePath)) { IntelliJKdbxDatabase kdbxDatabase = IntelliJKdbxDatabase.parse(inputStream, password); String jsonToken = kdbxDatabase.getDatabaseEntryValue("ADAuthManager"); if (CoreUtils.isNullOrEmpty(jsonToken)) { throw new CredentialUnavailableException("No credentials found in the cache." + " Please login with IntelliJ Azure Tools plugin in the IDE."); } return DEFAULT_MAPPER.readTree(jsonToken); } catch (Exception e) { throw LOGGER.logExceptionAsError(new RuntimeException("Failed to read KeePass database.", e)); } } private String getKdbxPassword() throws IOException { String passwordFilePath = new File(keePassDatabasePath).getParent() + File.separator + "c.pwd"; String extractedpwd = ""; try (BufferedReader reader = new BufferedReader(new FileReader(passwordFilePath))) { String line = reader.readLine(); while (line != null) { if (line.contains("value")) { String[] tokens = line.split(" "); if (tokens.length == 3) { extractedpwd = tokens[2]; break; } else { throw LOGGER.logExceptionAsError(new RuntimeException("Password not found in the file.")); } } line = reader.readLine(); } } return extractedpwd; } /** * Get the auth host of the specified {@code azureEnvironment}. * @param azureEnvironment the specified Azure Environment * @return the auth host. */ public String getAzureAuthHost(String azureEnvironment) { switch (azureEnvironment) { case "GLOBAL": return AzureAuthorityHosts.AZURE_PUBLIC_CLOUD; case "CHINA": return AzureAuthorityHosts.AZURE_CHINA; case "GERMAN": return AzureAuthorityHosts.AZURE_GERMANY; case "US_GOVERNMENT": return AzureAuthorityHosts.AZURE_GOVERNMENT; default: return AzureAuthorityHosts.AZURE_PUBLIC_CLOUD; } } /** * Parse the auth details of the specified file. * @param file the file input; * @return the parsed {@link IntelliJAuthMethodDetails} from the file input. * @throws IOException when invalid file path is specified. */ public IntelliJAuthMethodDetails parseAuthMethodDetails(File file) throws IOException { return DONT_FAIL_ON_UNKNOWN_PROPERTIES_MAPPER.readValue(file, IntelliJAuthMethodDetails.class); } /** * Get the current authentication method details of Azure Tools plugin in IntelliJ IDE. * * @return the {@link IntelliJAuthMethodDetails} * @throws IOException if an error is encountered while reading the auth details file. */ public IntelliJAuthMethodDetails getAuthDetailsIfAvailable() throws IOException { File authFile = null; for (String metadataPath : getAzureToolsForIntelliJPluginConfigPaths()) { String authMethodDetailsPath = Paths.get(metadataPath, "AuthMethodDetails.json").toString(); authFile = new File(authMethodDetailsPath); if (authFile.exists()) { break; } } if (authFile == null || !authFile.exists()) { return null; } IntelliJAuthMethodDetails authMethodDetails = parseAuthMethodDetails(authFile); String authType = authMethodDetails.getAuthMethod(); if (CoreUtils.isNullOrEmpty(authType)) { return null; } if ("SP".equalsIgnoreCase(authType)) { if (CoreUtils.isNullOrEmpty(authMethodDetails.getCredFilePath())) { return null; } } else if ("DC".equalsIgnoreCase(authType)) { if (CoreUtils.isNullOrEmpty(authMethodDetails.getAccountEmail())) { return null; } } return authMethodDetails; } }
Unnecessary array creation to pass into a varargs API
public Mono<PowershellManager> initSession() { ProcessBuilder pb; if (Platform.isWindows()) { pb = new ProcessBuilder("cmd.exe", "/c", "chcp", "65001", ">", "NUL", "&", powershellPath, "-ExecutionPolicy", "Bypass", "-NoExit", "-NoProfile", "-Command", "-"); } else { pb = new ProcessBuilder(powershellPath, "-nologo", "-noexit", "-Command", "-"); } pb.redirectErrorStream(true); Supplier<PowershellManager> supplier = () -> { try { this.process = pb.start(); this.commandWriter = new PrintWriter( new OutputStreamWriter(new BufferedOutputStream(process.getOutputStream()), StandardCharsets.UTF_8), true); if (this.process.waitFor(4L, TimeUnit.SECONDS) && !this.process.isAlive()) { throw new CredentialUnavailableException("Unable to execute PowerShell." + " Please make sure that it is installed in your system."); } this.closed = false; } catch (InterruptedException | IOException e) { throw new CredentialUnavailableException("Unable to execute PowerShell. " + "Please make sure that it is installed in your system", e); } return this; }; return executorService != null ? Mono.fromFuture(CompletableFuture.supplyAsync(supplier, executorService)) : Mono.fromFuture(CompletableFuture.supplyAsync(supplier)); }
pb = new ProcessBuilder(powershellPath, "-nologo", "-noexit", "-Command", "-");
public Mono<PowershellManager> initSession() { ProcessBuilder pb; if (Platform.isWindows()) { pb = new ProcessBuilder("cmd.exe", "/c", "chcp", "65001", ">", "NUL", "&", powershellPath, "-ExecutionPolicy", "Bypass", "-NoExit", "-NoProfile", "-Command", "-"); } else { pb = new ProcessBuilder(powershellPath, "-nologo", "-noexit", "-Command", "-"); } pb.redirectErrorStream(true); Supplier<PowershellManager> supplier = () -> { try { this.process = pb.start(); this.commandWriter = new PrintWriter( new OutputStreamWriter(new BufferedOutputStream(process.getOutputStream()), StandardCharsets.UTF_8), true); if (this.process.waitFor(4L, TimeUnit.SECONDS) && !this.process.isAlive()) { throw new CredentialUnavailableException("Unable to execute PowerShell." + " Please make sure that it is installed in your system."); } this.closed = false; } catch (InterruptedException | IOException e) { throw new CredentialUnavailableException("Unable to execute PowerShell. " + "Please make sure that it is installed in your system", e); } return this; }; return executorService != null ? Mono.fromFuture(CompletableFuture.supplyAsync(supplier, executorService)) : Mono.fromFuture(CompletableFuture.supplyAsync(supplier)); }
class PowershellManager { private static final ClientLogger LOGGER = new ClientLogger(PowershellManager.class); public static final Pattern PS_RESPONSE_PATTERN = Pattern.compile("\\s+$"); private Process process; private PrintWriter commandWriter; private boolean closed; private int waitPause = 1000; private long maxWait = 10000L; private final String powershellPath; private ExecutorService executorService; public PowershellManager(String powershellPath) { this.powershellPath = powershellPath; } public PowershellManager(String powershellPath, ExecutorService executorService) { this.powershellPath = powershellPath; this.executorService = executorService; } public Mono<String> runCommand(String command) { BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8)); StringBuilder powerShellOutput = new StringBuilder(); commandWriter.println(command); return canRead(reader) .flatMap(b -> { if (b) { return readData(reader, powerShellOutput) .flatMap(ignored -> Mono.just(PS_RESPONSE_PATTERN.matcher(powerShellOutput.toString()) .replaceAll(""))); } else { return Mono.error(new CredentialUnavailableException("Error reading data from reader")); } }); } private Mono<Boolean> readData(BufferedReader reader, StringBuilder powerShellOutput) { return Mono.defer(() -> { String line; try { line = reader.readLine(); if (line != null) { powerShellOutput.append(line).append("\r\n"); return canRead(reader).flatMap(b -> { if (!this.closed && b) { return Mono.empty(); } return Mono.just(true); }); } else { return Mono.just(true); } } catch (IOException e) { return Mono.error( new CredentialUnavailableException("Powershell reader not ready for reading", e)); } }).repeatWhenEmpty((Flux<Long> longFlux) -> longFlux.concatMap(ignored -> Flux.just(true))); } private Mono<Boolean> canRead(BufferedReader reader) { Supplier<Boolean> supplier = () -> { int pause = 62; int maxPause = Platform.isMac() ? this.waitPause : 500; while (true) { try { if (!reader.ready()) { if (pause > maxPause) { return false; } pause *= 2; Thread.sleep((long) pause); } else { break; } } catch (IOException | InterruptedException e) { throw new CredentialUnavailableException("Powershell reader not ready for reading", e); } } return true; }; return executorService != null ? Mono.fromFuture(CompletableFuture.supplyAsync(supplier, executorService)) : Mono.fromFuture(CompletableFuture.supplyAsync(supplier)); } public Mono<Boolean> close() { if (!this.closed && this.process != null) { Supplier<Boolean> supplier = () -> { this.commandWriter.println("exit"); try { this.process.waitFor(maxWait, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { LOGGER.logExceptionAsError(new RuntimeException("PowerShell process encountered unexpected" + " error when closing.", e)); } finally { this.commandWriter.close(); try { if (process.isAlive()) { process.getInputStream().close(); } } catch (IOException ex) { LOGGER.logExceptionAsError(new RuntimeException("PowerShell stream encountered unexpected" + " error when closing.", ex)); } this.closed = true; } return this.closed; }; return executorService != null ? Mono.fromFuture(CompletableFuture.supplyAsync(supplier, executorService)) : Mono.fromFuture(CompletableFuture.supplyAsync(supplier)); } else { return Mono.just(true); } } }
class PowershellManager { private static final ClientLogger LOGGER = new ClientLogger(PowershellManager.class); public static final Pattern PS_RESPONSE_PATTERN = Pattern.compile("\\s+$"); private Process process; private PrintWriter commandWriter; private boolean closed; private int waitPause = 1000; private long maxWait = 10000L; private final String powershellPath; private ExecutorService executorService; public PowershellManager(String powershellPath) { this.powershellPath = powershellPath; } public PowershellManager(String powershellPath, ExecutorService executorService) { this.powershellPath = powershellPath; this.executorService = executorService; } public Mono<String> runCommand(String command) { BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8)); StringBuilder powerShellOutput = new StringBuilder(); commandWriter.println(command); return canRead(reader) .flatMap(b -> { if (b) { return readData(reader, powerShellOutput) .flatMap(ignored -> Mono.just(PS_RESPONSE_PATTERN.matcher(powerShellOutput.toString()) .replaceAll(""))); } else { return Mono.error(new CredentialUnavailableException("Error reading data from reader")); } }); } private Mono<Boolean> readData(BufferedReader reader, StringBuilder powerShellOutput) { return Mono.defer(() -> { String line; try { line = reader.readLine(); if (line != null) { powerShellOutput.append(line).append("\r\n"); return canRead(reader).flatMap(b -> { if (!this.closed && b) { return Mono.empty(); } return Mono.just(true); }); } else { return Mono.just(true); } } catch (IOException e) { return Mono.error( new CredentialUnavailableException("Powershell reader not ready for reading", e)); } }).repeatWhenEmpty((Flux<Long> longFlux) -> longFlux.concatMap(ignored -> Flux.just(true))); } private Mono<Boolean> canRead(BufferedReader reader) { Supplier<Boolean> supplier = () -> { int pause = 62; int maxPause = Platform.isMac() ? this.waitPause : 500; while (true) { try { if (!reader.ready()) { if (pause > maxPause) { return false; } pause *= 2; Thread.sleep((long) pause); } else { break; } } catch (IOException | InterruptedException e) { throw new CredentialUnavailableException("Powershell reader not ready for reading", e); } } return true; }; return executorService != null ? Mono.fromFuture(CompletableFuture.supplyAsync(supplier, executorService)) : Mono.fromFuture(CompletableFuture.supplyAsync(supplier)); } public Mono<Boolean> close() { if (!this.closed && this.process != null) { Supplier<Boolean> supplier = () -> { this.commandWriter.println("exit"); try { this.process.waitFor(maxWait, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { LOGGER.logExceptionAsError(new RuntimeException("PowerShell process encountered unexpected" + " error when closing.", e)); } finally { this.commandWriter.close(); try { if (process.isAlive()) { process.getInputStream().close(); } } catch (IOException ex) { LOGGER.logExceptionAsError(new RuntimeException("PowerShell stream encountered unexpected" + " error when closing.", ex)); } this.closed = true; } return this.closed; }; return executorService != null ? Mono.fromFuture(CompletableFuture.supplyAsync(supplier, executorService)) : Mono.fromFuture(CompletableFuture.supplyAsync(supplier)); } else { return Mono.just(true); } } }
Use StringBuilder instead of StringBuffer which uses locking and this is used in a thread-safe fashion
private static String extractCertificateThumbprint(Certificate certificate, ClientLogger logger) { try { StringBuilder thumbprint = new StringBuilder(); MessageDigest messageDigest; messageDigest = MessageDigest.getInstance("SHA-1"); byte[] encodedCertificate; try { encodedCertificate = certificate.getEncoded(); } catch (CertificateEncodingException e) { throw new RuntimeException(e); } byte[] updatedDigest = messageDigest.digest(encodedCertificate); for (byte b : updatedDigest) { int unsignedByte = b & 0xff; if (unsignedByte < 16) { thumbprint.append("0"); } thumbprint.append(Integer.toHexString(unsignedByte)); } return thumbprint.toString(); } catch (NoSuchAlgorithmException e) { throw logger.logExceptionAsError(new RuntimeException(e)); } }
StringBuilder thumbprint = new StringBuilder();
private static String extractCertificateThumbprint(Certificate certificate, ClientLogger logger) { try { StringBuilder thumbprint = new StringBuilder(); MessageDigest messageDigest; messageDigest = MessageDigest.getInstance("SHA-1"); byte[] encodedCertificate; try { encodedCertificate = certificate.getEncoded(); } catch (CertificateEncodingException e) { throw new RuntimeException(e); } byte[] updatedDigest = messageDigest.digest(encodedCertificate); for (byte b : updatedDigest) { int unsignedByte = b & 0xff; if (unsignedByte < 16) { thumbprint.append("0"); } thumbprint.append(Integer.toHexString(unsignedByte)); } return thumbprint.toString(); } catch (NoSuchAlgorithmException e) { throw logger.logExceptionAsError(new RuntimeException(e)); } }
class IdentitySslUtil { public static final HostnameVerifier ALL_HOSTS_ACCEPT_HOSTNAME_VERIFIER; static { ALL_HOSTS_ACCEPT_HOSTNAME_VERIFIER = new HostnameVerifier() { @SuppressWarnings("BadHostnameVerifier") @Override public boolean verify(String hostname, SSLSession session) { return true; } }; } private IdentitySslUtil() { } /** * * Pins the specified HTTPS URL Connection to work against a specific server-side certificate with * the specified thumbprint only. * * @param httpsUrlConnection The https url connection to configure * @param certificateThumbprint The thumbprint of the certificate * @param logger The {@link ClientLogger} used to log any errors that occur in this method call. */ public static void addTrustedCertificateThumbprint(HttpsURLConnection httpsUrlConnection, String certificateThumbprint, ClientLogger logger) { if (httpsUrlConnection.getHostnameVerifier() != ALL_HOSTS_ACCEPT_HOSTNAME_VERIFIER) { httpsUrlConnection.setHostnameVerifier(ALL_HOSTS_ACCEPT_HOSTNAME_VERIFIER); } TrustManager[] certificateTrust = new TrustManager[]{new X509TrustManager() { public X509Certificate[] getAcceptedIssuers() { return new X509Certificate[]{}; } public void checkClientTrusted(X509Certificate[] certificates, String authenticationType) throws CertificateException { throw logger.logExceptionAsError(new RuntimeException("No client side certificate configured.")); } public void checkServerTrusted(X509Certificate[] certificates, String authenticationType) throws CertificateException { if (certificates == null || certificates.length == 0) { throw logger.logExceptionAsError( new RuntimeException("Did not receive any certificate from the server.")); } for (X509Certificate x509Certificate : certificates) { String sslCertificateThumbprint = extractCertificateThumbprint(x509Certificate, logger); if (certificateThumbprint.equalsIgnoreCase(sslCertificateThumbprint)) { return; } } throw logger.logExceptionAsError(new RuntimeException( "Thumbprint of certificates received did not match the expected thumbprint.")); } } }; SSLSocketFactory sslSocketFactory; try { SSLContext sslContext = SSLContext.getInstance("TLS"); sslContext.init(null, certificateTrust, null); sslSocketFactory = sslContext.getSocketFactory(); } catch (NoSuchAlgorithmException | KeyManagementException e) { throw logger.logExceptionAsError(new RuntimeException("Error Creating SSL Context", e)); } if (httpsUrlConnection.getSSLSocketFactory() != sslSocketFactory) { httpsUrlConnection.setSSLSocketFactory(sslSocketFactory); } } }
class IdentitySslUtil { public static final HostnameVerifier ALL_HOSTS_ACCEPT_HOSTNAME_VERIFIER; static { ALL_HOSTS_ACCEPT_HOSTNAME_VERIFIER = new HostnameVerifier() { @SuppressWarnings("BadHostnameVerifier") @Override public boolean verify(String hostname, SSLSession session) { return true; } }; } private IdentitySslUtil() { } /** * * Pins the specified HTTPS URL Connection to work against a specific server-side certificate with * the specified thumbprint only. * * @param httpsUrlConnection The https url connection to configure * @param certificateThumbprint The thumbprint of the certificate * @param logger The {@link ClientLogger} used to log any errors that occur in this method call. */ public static void addTrustedCertificateThumbprint(HttpsURLConnection httpsUrlConnection, String certificateThumbprint, ClientLogger logger) { if (httpsUrlConnection.getHostnameVerifier() != ALL_HOSTS_ACCEPT_HOSTNAME_VERIFIER) { httpsUrlConnection.setHostnameVerifier(ALL_HOSTS_ACCEPT_HOSTNAME_VERIFIER); } TrustManager[] certificateTrust = new TrustManager[]{new X509TrustManager() { public X509Certificate[] getAcceptedIssuers() { return new X509Certificate[]{}; } public void checkClientTrusted(X509Certificate[] certificates, String authenticationType) throws CertificateException { throw logger.logExceptionAsError(new RuntimeException("No client side certificate configured.")); } public void checkServerTrusted(X509Certificate[] certificates, String authenticationType) throws CertificateException { if (certificates == null || certificates.length == 0) { throw logger.logExceptionAsError( new RuntimeException("Did not receive any certificate from the server.")); } for (X509Certificate x509Certificate : certificates) { String sslCertificateThumbprint = extractCertificateThumbprint(x509Certificate, logger); if (certificateThumbprint.equalsIgnoreCase(sslCertificateThumbprint)) { return; } } throw logger.logExceptionAsError(new RuntimeException( "Thumbprint of certificates received did not match the expected thumbprint.")); } } }; SSLSocketFactory sslSocketFactory; try { SSLContext sslContext = SSLContext.getInstance("TLS"); sslContext.init(null, certificateTrust, null); sslSocketFactory = sslContext.getSocketFactory(); } catch (NoSuchAlgorithmException | KeyManagementException e) { throw logger.logExceptionAsError(new RuntimeException("Error Creating SSL Context", e)); } if (httpsUrlConnection.getSSLSocketFactory() != sslSocketFactory) { httpsUrlConnection.setSSLSocketFactory(sslSocketFactory); } } }
👍
public AzureApplicationCredentialBuilder managedIdentityResourceId(String resourceId) { this.managedIdentityResourceId = resourceId; return this; }
this.managedIdentityResourceId = resourceId;
public AzureApplicationCredentialBuilder managedIdentityResourceId(String resourceId) { this.managedIdentityResourceId = resourceId; return this; }
class AzureApplicationCredentialBuilder extends CredentialBuilderBase<AzureApplicationCredentialBuilder> { private static final ClientLogger LOGGER = new ClientLogger(AzureApplicationCredentialBuilder.class); private String managedIdentityClientId; private String managedIdentityResourceId; /** * Creates an instance of a AzureApplicationCredentialBuilder. */ AzureApplicationCredentialBuilder() { Configuration configuration = Configuration.getGlobalConfiguration().clone(); managedIdentityClientId = configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID); } /** * Specifies the Azure Active Directory endpoint to acquire tokens. * @param authorityHost the Azure Active Directory endpoint * @return An updated instance of this builder with the authority host set as specified. */ public AzureApplicationCredentialBuilder authorityHost(String authorityHost) { this.identityClientOptions.setAuthorityHost(authorityHost); return this; } /** * Specifies the client ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * @param clientId the client ID * @return An updated instance of this builder with the managed identity client id set as specified. */ public AzureApplicationCredentialBuilder managedIdentityClientId(String clientId) { this.managedIdentityClientId = clientId; return this; } /** * Specifies the resource ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. * * @param resourceId the resource ID * @return An updated instance of this builder with the managed identity client id set as specified. */ /** * Specifies the ExecutorService to be used to execute the authentication requests. * Developer is responsible for maintaining the lifecycle of the ExecutorService. * * <p> * If this is not configured, the {@link ForkJoinPool * also shared with other application tasks. If the common pool is heavily used for other tasks, authentication * requests might starve and setting up this executor service should be considered. * </p> * * <p> The executor service and can be safely shutdown if the TokenCredential is no longer being used by the * Azure SDK clients and should be shutdown before the application exits. </p> * * @param executorService the executor service to use for executing authentication requests. * @return An updated instance of this builder with the executor service set as specified. */ public AzureApplicationCredentialBuilder executorService(ExecutorService executorService) { this.identityClientOptions.setExecutorService(executorService); return this; } /** * Creates new {@link AzureApplicationCredential} with the configured options set. * @return a {@link AzureApplicationCredential} with the current configurations. * @throws IllegalStateException if clientId and resourceId are both set. */ public AzureApplicationCredential build() { if (managedIdentityClientId != null && managedIdentityResourceId != null) { throw LOGGER.logExceptionAsError( new IllegalStateException("Only one of managedIdentityClientId and managedIdentityResourceId can be specified.")); } return new AzureApplicationCredential(getCredentialsChain()); } private ArrayList<TokenCredential> getCredentialsChain() { ArrayList<TokenCredential> output = new ArrayList<TokenCredential>(2); output.add(new EnvironmentCredential(identityClientOptions)); output.add(new ManagedIdentityCredential(managedIdentityClientId, managedIdentityResourceId, identityClientOptions)); return output; } }
class AzureApplicationCredentialBuilder extends CredentialBuilderBase<AzureApplicationCredentialBuilder> { private static final ClientLogger LOGGER = new ClientLogger(AzureApplicationCredentialBuilder.class); private String managedIdentityClientId; private String managedIdentityResourceId; /** * Creates an instance of a AzureApplicationCredentialBuilder. */ AzureApplicationCredentialBuilder() { Configuration configuration = Configuration.getGlobalConfiguration().clone(); managedIdentityClientId = configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID); } /** * Specifies the Azure Active Directory endpoint to acquire tokens. * @param authorityHost the Azure Active Directory endpoint * @return An updated instance of this builder with the authority host set as specified. */ public AzureApplicationCredentialBuilder authorityHost(String authorityHost) { this.identityClientOptions.setAuthorityHost(authorityHost); return this; } /** * Specifies the client ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * @param clientId the client ID * @return An updated instance of this builder with the managed identity client id set as specified. */ public AzureApplicationCredentialBuilder managedIdentityClientId(String clientId) { this.managedIdentityClientId = clientId; return this; } /** * Specifies the resource ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. * * @param resourceId the resource ID * @return An updated instance of this builder with the managed identity client id set as specified. */ /** * Specifies the ExecutorService to be used to execute the authentication requests. * Developer is responsible for maintaining the lifecycle of the ExecutorService. * * <p> * If this is not configured, the {@link ForkJoinPool * also shared with other application tasks. If the common pool is heavily used for other tasks, authentication * requests might starve and setting up this executor service should be considered. * </p> * * <p> The executor service and can be safely shutdown if the TokenCredential is no longer being used by the * Azure SDK clients and should be shutdown before the application exits. </p> * * @param executorService the executor service to use for executing authentication requests. * @return An updated instance of this builder with the executor service set as specified. */ public AzureApplicationCredentialBuilder executorService(ExecutorService executorService) { this.identityClientOptions.setExecutorService(executorService); return this; } /** * Creates new {@link AzureApplicationCredential} with the configured options set. * @return a {@link AzureApplicationCredential} with the current configurations. * @throws IllegalStateException if clientId and resourceId are both set. */ public AzureApplicationCredential build() { if (managedIdentityClientId != null && managedIdentityResourceId != null) { throw LOGGER.logExceptionAsError( new IllegalStateException("Only one of managedIdentityClientId and managedIdentityResourceId can be specified.")); } return new AzureApplicationCredential(getCredentialsChain()); } private ArrayList<TokenCredential> getCredentialsChain() { ArrayList<TokenCredential> output = new ArrayList<TokenCredential>(2); output.add(new EnvironmentCredential(identityClientOptions)); output.add(new ManagedIdentityCredential(managedIdentityClientId, managedIdentityResourceId, identityClientOptions)); return output; } }
👍
public Mono<AccessToken> authenticate(TokenRequestContext request) { if (this.getClientId() == null) { return Mono.error(LOGGER.logExceptionAsError(new IllegalStateException("The client id is not configured via" + " 'AZURE_CLIENT_ID' environment variable or through the credential builder." + " Please ensure client id is provided to authenticate via token exchange in AKS environment."))); } return identityClient.authenticateWithExchangeToken(request); }
return identityClient.authenticateWithExchangeToken(request);
public Mono<AccessToken> authenticate(TokenRequestContext request) { if (this.getClientId() == null) { return Mono.error(LOGGER.logExceptionAsError(new IllegalStateException("The client id is not configured via" + " 'AZURE_CLIENT_ID' environment variable or through the credential builder." + " Please ensure client id is provided to authenticate via token exchange in AKS environment."))); } return identityClient.authenticateWithExchangeToken(request); }
class AksExchangeTokenCredential extends ManagedIdentityServiceCredential { private static final ClientLogger LOGGER = new ClientLogger(AksExchangeTokenCredential.class); /** * Creates an instance of AksExchangeTokenCredential. * * @param clientId the client id of user assigned or system assigned identity. * @param identityClient the identity client to acquire a token with. */ AksExchangeTokenCredential(String clientId, IdentityClient identityClient) { super(clientId, identityClient, "AZURE AKS TOKEN EXCHANGE"); } @Override }
class AksExchangeTokenCredential extends ManagedIdentityServiceCredential { private static final ClientLogger LOGGER = new ClientLogger(AksExchangeTokenCredential.class); /** * Creates an instance of AksExchangeTokenCredential. * * @param clientId the client id of user assigned or system assigned identity. * @param identityClient the identity client to acquire a token with. */ AksExchangeTokenCredential(String clientId, IdentityClient identityClient) { super(clientId, identityClient, "AZURE AKS TOKEN EXCHANGE"); } @Override }
does !isInCreateMode() == is in update mode?
public GalleryImageImpl withEndOfLifeDate(OffsetDateTime endOfLifeDate) { this.innerModel().withEndOfLifeDate(endOfLifeDate); if (!isInCreateMode()) { this.galleryImageUpdate.withEndOfLifeDate(endOfLifeDate); } return this; }
if (!isInCreateMode()) {
public GalleryImageImpl withEndOfLifeDate(OffsetDateTime endOfLifeDate) { this.innerModel().withEndOfLifeDate(endOfLifeDate); if (isInUpdateMode()) { this.galleryImageUpdate.withEndOfLifeDate(endOfLifeDate); } return this; }
class GalleryImageImpl extends CreatableUpdatableImpl<GalleryImage, GalleryImageInner, GalleryImageImpl> implements GalleryImage, GalleryImage.Definition, GalleryImage.Update { private final ComputeManager manager; private String resourceGroupName; private String galleryName; private String galleryImageName; private GalleryImageUpdate galleryImageUpdate; GalleryImageImpl(String name, ComputeManager manager) { super(name, new GalleryImageInner()); this.manager = manager; this.galleryImageName = name; } GalleryImageImpl(GalleryImageInner inner, ComputeManager manager) { super(inner.name(), inner); this.manager = manager; this.galleryImageName = inner.name(); this.resourceGroupName = getValueFromIdByName(inner.id(), "resourceGroups"); this.galleryName = getValueFromIdByName(inner.id(), "galleries"); this.galleryImageName = getValueFromIdByName(inner.id(), "images"); } @Override public Mono<GalleryImageVersion> getVersionAsync(String versionName) { return this .manager() .galleryImageVersions() .getByGalleryImageAsync(this.resourceGroupName, this.galleryName, this.galleryImageName, versionName); } @Override public GalleryImageVersion getVersion(String versionName) { return this .manager() .galleryImageVersions() .getByGalleryImage(this.resourceGroupName, this.galleryName, this.galleryImageName, versionName); } @Override public PagedFlux<GalleryImageVersion> listVersionsAsync() { return this .manager() .galleryImageVersions() .listByGalleryImageAsync(this.resourceGroupName, this.galleryName, this.galleryImageName); } @Override public PagedIterable<GalleryImageVersion> listVersions() { return this .manager() .galleryImageVersions() .listByGalleryImage(this.resourceGroupName, this.galleryName, this.galleryImageName); } @Override public ComputeManager manager() { return this.manager; } @Override public Mono<GalleryImage> createResourceAsync() { return manager() .serviceClient() .getGalleryImages() .createOrUpdateAsync(this.resourceGroupName, this.galleryName, this.galleryImageName, this.innerModel()) .map(innerToFluentMap(this)); } @Override public GalleryImageImpl update() { this.galleryImageUpdate = new GalleryImageUpdate(); return super.update(); } @Override public Mono<GalleryImage> updateResourceAsync() { this.galleryImageUpdate .withOsState(innerModel().osState()) .withOsType(innerModel().osType()) .withIdentifier(innerModel().identifier()); return manager() .serviceClient() .getGalleryImages() .updateAsync(this.resourceGroupName, this.galleryName, this.galleryImageName, this.galleryImageUpdate) .map(innerToFluentMap(this)); } @Override protected Mono<GalleryImageInner> getInnerAsync() { return manager() .serviceClient() .getGalleryImages() .getAsync(this.resourceGroupName, this.galleryName, this.galleryImageName); } @Override public boolean isInCreateMode() { return this.innerModel().id() == null; } @Override public String description() { return this.innerModel().description(); } @Override public List<DiskSkuTypes> unsupportedDiskTypes() { if (this.innerModel().disallowed() == null || this.innerModel().disallowed().diskTypes() == null) { return Collections.unmodifiableList(new ArrayList<DiskSkuTypes>()); } else { List<DiskSkuTypes> diskTypes = new ArrayList<DiskSkuTypes>(); for (String diskTypeStr : this.innerModel().disallowed().diskTypes()) { diskTypes.add(DiskSkuTypes.fromStorageAccountType(DiskStorageAccountTypes.fromString(diskTypeStr))); } return Collections.unmodifiableList(diskTypes); } } @Override public Disallowed disallowed() { return this.innerModel().disallowed(); } @Override public OffsetDateTime endOfLifeDate() { return this.innerModel().endOfLifeDate(); } @Override public String eula() { return this.innerModel().eula(); } @Override public String id() { return this.innerModel().id(); } @Override public GalleryImageIdentifier identifier() { return this.innerModel().identifier(); } @Override public String location() { return this.innerModel().location(); } @Override public String name() { return this.innerModel().name(); } @Override public OperatingSystemStateTypes osState() { return this.innerModel().osState(); } @Override public OperatingSystemTypes osType() { return this.innerModel().osType(); } @Override public String privacyStatementUri() { return this.innerModel().privacyStatementUri(); } @Override public String provisioningState() { return this.innerModel().provisioningState().toString(); } @Override public ImagePurchasePlan purchasePlan() { return this.innerModel().purchasePlan(); } @Override public RecommendedMachineConfiguration recommendedVirtualMachineConfiguration() { return this.innerModel().recommended(); } @Override public String releaseNoteUri() { return this.innerModel().releaseNoteUri(); } @Override public Map<String, String> tags() { return this.innerModel().tags(); } @Override public String type() { return this.innerModel().type(); } @Override public GalleryImageImpl withExistingGallery(String resourceGroupName, String galleryName) { this.resourceGroupName = resourceGroupName; this.galleryName = galleryName; return this; } @Override public GalleryImageImpl withExistingGallery(Gallery gallery) { this.resourceGroupName = gallery.resourceGroupName(); this.galleryName = gallery.name(); return this; } @Override public GalleryImageImpl withLocation(String location) { this.innerModel().withLocation(location); return this; } @Override public GalleryImageImpl withLocation(Region location) { this.innerModel().withLocation(location.toString()); return this; } @Override public GalleryImageImpl withIdentifier(GalleryImageIdentifier identifier) { this.innerModel().withIdentifier(identifier); return this; } @Override public GalleryImageImpl withIdentifier(String publisher, String offer, String sku) { this .innerModel() .withIdentifier(new GalleryImageIdentifier().withPublisher(publisher).withOffer(offer).withSku(sku)); return this; } @Override public GalleryImageImpl withGeneralizedWindows() { return this.withWindows(OperatingSystemStateTypes.GENERALIZED); } @Override public GalleryImageImpl withGeneralizedLinux() { return this.withLinux(OperatingSystemStateTypes.GENERALIZED); } @Override public GalleryImageImpl withWindows(OperatingSystemStateTypes osState) { this.innerModel().withOsType(OperatingSystemTypes.WINDOWS).withOsState(osState); return this; } @Override public GalleryImageImpl withLinux(OperatingSystemStateTypes osState) { this.innerModel().withOsType(OperatingSystemTypes.LINUX).withOsState(osState); return this; } @Override public GalleryImageImpl withDescription(String description) { this.innerModel().withDescription(description); if (!isInCreateMode()) { this.galleryImageUpdate.withDescription(description); } return this; } @Override public GalleryImageImpl withUnsupportedDiskType(DiskSkuTypes diskType) { if (this.innerModel().disallowed() == null) { this.innerModel().withDisallowed(new Disallowed()); } if (this.innerModel().disallowed().diskTypes() == null) { this.innerModel().disallowed().withDiskTypes(new ArrayList<String>()); } boolean found = false; String newDiskTypeStr = diskType.toString(); for (String diskTypeStr : this.innerModel().disallowed().diskTypes()) { if (diskTypeStr.equalsIgnoreCase(newDiskTypeStr)) { found = true; break; } } if (!found) { this.innerModel().disallowed().diskTypes().add(diskType.toString()); } if (!isInCreateMode()) { this.galleryImageUpdate.withDisallowed(this.innerModel().disallowed()); } return this; } @Override public GalleryImageImpl withUnsupportedDiskTypes(List<DiskSkuTypes> diskTypes) { if (this.innerModel().disallowed() == null) { this.innerModel().withDisallowed(new Disallowed()); } this.innerModel().disallowed().withDiskTypes(new ArrayList<String>()); for (DiskSkuTypes diskType : diskTypes) { this.innerModel().disallowed().diskTypes().add(diskType.toString()); } if (!isInCreateMode()) { this.galleryImageUpdate.withDisallowed(this.innerModel().disallowed()); } return this; } @Override public GalleryImageImpl withoutUnsupportedDiskType(DiskSkuTypes diskType) { if (this.innerModel().disallowed() != null && this.innerModel().disallowed().diskTypes() != null) { int foundIndex = -1; int i = 0; String diskTypeToRemove = diskType.toString(); for (String diskTypeStr : this.innerModel().disallowed().diskTypes()) { if (diskTypeStr.equalsIgnoreCase(diskTypeToRemove)) { foundIndex = i; break; } i++; } if (foundIndex != -1) { this.innerModel().disallowed().diskTypes().remove(foundIndex); } if (!isInCreateMode()) { this.galleryImageUpdate.withDisallowed(this.innerModel().disallowed()); } } return this; } @Override public GalleryImageImpl withDisallowed(Disallowed disallowed) { this.innerModel().withDisallowed(disallowed); if (!isInCreateMode()) { this.galleryImageUpdate.withDisallowed(disallowed); } return this; } @Override @Override public GalleryImageImpl withEula(String eula) { this.innerModel().withEula(eula); if (!isInCreateMode()) { this.galleryImageUpdate.withEula(eula); } return this; } @Override public GalleryImageImpl withOsState(OperatingSystemStateTypes osState) { this.innerModel().withOsState(osState); if (!isInCreateMode()) { this.galleryImageUpdate.withOsState(osState); } return this; } @Override public GalleryImageImpl withPrivacyStatementUri(String privacyStatementUri) { this.innerModel().withPrivacyStatementUri(privacyStatementUri); if (!isInCreateMode()) { this.galleryImageUpdate.withPrivacyStatementUri(privacyStatementUri); } return this; } @Override public GalleryImageImpl withPurchasePlan(String name, String publisher, String product) { return this .withPurchasePlan(new ImagePurchasePlan().withName(name).withPublisher(publisher).withProduct(product)); } @Override public GalleryImageImpl withPurchasePlan(ImagePurchasePlan purchasePlan) { this.innerModel().withPurchasePlan(purchasePlan); return this; } @Override public GalleryImageImpl withRecommendedMinimumCPUsCountForVirtualMachine(int minCount) { if (this.innerModel().recommended() == null) { this.innerModel().withRecommended(new RecommendedMachineConfiguration()); } if (this.innerModel().recommended().vCPUs() == null) { this.innerModel().recommended().withVCPUs(new ResourceRange()); } this.innerModel().recommended().vCPUs().withMin(minCount); if (!isInCreateMode()) { this.galleryImageUpdate.withRecommended(this.innerModel().recommended()); } return this; } @Override public GalleryImageImpl withRecommendedMaximumCPUsCountForVirtualMachine(int maxCount) { if (this.innerModel().recommended() == null) { this.innerModel().withRecommended(new RecommendedMachineConfiguration()); } if (this.innerModel().recommended().vCPUs() == null) { this.innerModel().recommended().withVCPUs(new ResourceRange()); } this.innerModel().recommended().vCPUs().withMax(maxCount); if (!isInCreateMode()) { this.galleryImageUpdate.withRecommended(this.innerModel().recommended()); } return this; } @Override public GalleryImageImpl withRecommendedCPUsCountForVirtualMachine(int minCount, int maxCount) { if (this.innerModel().recommended() == null) { this.innerModel().withRecommended(new RecommendedMachineConfiguration()); } this.innerModel().recommended().withVCPUs(new ResourceRange()); this.innerModel().recommended().vCPUs().withMin(minCount); this.innerModel().recommended().vCPUs().withMax(maxCount); if (!isInCreateMode()) { this.galleryImageUpdate.withRecommended(this.innerModel().recommended()); } return this; } @Override public GalleryImageImpl withRecommendedMinimumMemoryForVirtualMachine(int minMB) { if (this.innerModel().recommended() == null) { this.innerModel().withRecommended(new RecommendedMachineConfiguration()); } if (this.innerModel().recommended().memory() == null) { this.innerModel().recommended().withMemory(new ResourceRange()); } this.innerModel().recommended().memory().withMin(minMB); if (!isInCreateMode()) { this.galleryImageUpdate.withRecommended(this.innerModel().recommended()); } return this; } @Override public GalleryImageImpl withRecommendedMaximumMemoryForVirtualMachine(int maxMB) { if (this.innerModel().recommended() == null) { this.innerModel().withRecommended(new RecommendedMachineConfiguration()); } if (this.innerModel().recommended().memory() == null) { this.innerModel().recommended().withMemory(new ResourceRange()); } this.innerModel().recommended().memory().withMax(maxMB); if (!isInCreateMode()) { this.galleryImageUpdate.withRecommended(this.innerModel().recommended()); } return this; } @Override public GalleryImageImpl withRecommendedMemoryForVirtualMachine(int minMB, int maxMB) { if (this.innerModel().recommended() == null) { this.innerModel().withRecommended(new RecommendedMachineConfiguration()); } this.innerModel().recommended().withMemory(new ResourceRange()); this.innerModel().recommended().memory().withMin(minMB); this.innerModel().recommended().memory().withMax(maxMB); if (!isInCreateMode()) { this.galleryImageUpdate.withRecommended(this.innerModel().recommended()); } return this; } @Override public GalleryImageImpl withRecommendedConfigurationForVirtualMachine( RecommendedMachineConfiguration recommendedConfig) { this.innerModel().withRecommended(recommendedConfig); if (!isInCreateMode()) { this.galleryImageUpdate.withRecommended(recommendedConfig); } return this; } @Override public GalleryImageImpl withReleaseNoteUri(String releaseNoteUri) { this.innerModel().withReleaseNoteUri(releaseNoteUri); if (!isInCreateMode()) { this.galleryImageUpdate.withReleaseNoteUri(releaseNoteUri); } return this; } @Override public GalleryImageImpl withTags(Map<String, String> tags) { this.innerModel().withTags(tags); if (!isInCreateMode()) { this.galleryImageUpdate.withTags(tags); } return this; } private static String getValueFromIdByName(String id, String name) { if (id == null) { return null; } Iterable<String> iterable = Arrays.asList(id.split("/")); Iterator<String> itr = iterable.iterator(); while (itr.hasNext()) { String part = itr.next(); if (part != null && !part.trim().isEmpty()) { if (part.equalsIgnoreCase(name)) { if (itr.hasNext()) { return itr.next(); } else { return null; } } } } return null; } }
class GalleryImageImpl extends CreatableUpdatableImpl<GalleryImage, GalleryImageInner, GalleryImageImpl> implements GalleryImage, GalleryImage.Definition, GalleryImage.Update { private final ComputeManager manager; private String resourceGroupName; private String galleryName; private String galleryImageName; private GalleryImageUpdate galleryImageUpdate; GalleryImageImpl(String name, ComputeManager manager) { super(name, new GalleryImageInner()); this.manager = manager; this.galleryImageName = name; } GalleryImageImpl(GalleryImageInner inner, ComputeManager manager) { super(inner.name(), inner); this.manager = manager; this.galleryImageName = inner.name(); this.resourceGroupName = getValueFromIdByName(inner.id(), "resourceGroups"); this.galleryName = getValueFromIdByName(inner.id(), "galleries"); this.galleryImageName = getValueFromIdByName(inner.id(), "images"); } @Override public Mono<GalleryImageVersion> getVersionAsync(String versionName) { return this .manager() .galleryImageVersions() .getByGalleryImageAsync(this.resourceGroupName, this.galleryName, this.galleryImageName, versionName); } @Override public GalleryImageVersion getVersion(String versionName) { return this .manager() .galleryImageVersions() .getByGalleryImage(this.resourceGroupName, this.galleryName, this.galleryImageName, versionName); } @Override public PagedFlux<GalleryImageVersion> listVersionsAsync() { return this .manager() .galleryImageVersions() .listByGalleryImageAsync(this.resourceGroupName, this.galleryName, this.galleryImageName); } @Override public PagedIterable<GalleryImageVersion> listVersions() { return this .manager() .galleryImageVersions() .listByGalleryImage(this.resourceGroupName, this.galleryName, this.galleryImageName); } @Override public ComputeManager manager() { return this.manager; } @Override public Mono<GalleryImage> createResourceAsync() { return manager() .serviceClient() .getGalleryImages() .createOrUpdateAsync(this.resourceGroupName, this.galleryName, this.galleryImageName, this.innerModel()) .map(innerToFluentMap(this)); } @Override public GalleryImageImpl update() { this.galleryImageUpdate = new GalleryImageUpdate(); return super.update(); } @Override public Mono<GalleryImage> updateResourceAsync() { this.galleryImageUpdate .withOsState(innerModel().osState()) .withOsType(innerModel().osType()) .withIdentifier(innerModel().identifier()); return manager() .serviceClient() .getGalleryImages() .updateAsync(this.resourceGroupName, this.galleryName, this.galleryImageName, this.galleryImageUpdate) .map(innerToFluentMap(this)); } @Override protected Mono<GalleryImageInner> getInnerAsync() { return manager() .serviceClient() .getGalleryImages() .getAsync(this.resourceGroupName, this.galleryName, this.galleryImageName); } @Override public boolean isInCreateMode() { return this.innerModel().id() == null; } @Override public String description() { return this.innerModel().description(); } @Override public List<DiskSkuTypes> unsupportedDiskTypes() { if (this.innerModel().disallowed() == null || this.innerModel().disallowed().diskTypes() == null) { return Collections.unmodifiableList(new ArrayList<DiskSkuTypes>()); } else { List<DiskSkuTypes> diskTypes = new ArrayList<DiskSkuTypes>(); for (String diskTypeStr : this.innerModel().disallowed().diskTypes()) { diskTypes.add(DiskSkuTypes.fromStorageAccountType(DiskStorageAccountTypes.fromString(diskTypeStr))); } return Collections.unmodifiableList(diskTypes); } } @Override public Disallowed disallowed() { return this.innerModel().disallowed(); } @Override public OffsetDateTime endOfLifeDate() { return this.innerModel().endOfLifeDate(); } @Override public String eula() { return this.innerModel().eula(); } @Override public String id() { return this.innerModel().id(); } @Override public GalleryImageIdentifier identifier() { return this.innerModel().identifier(); } @Override public String location() { return this.innerModel().location(); } @Override public String name() { return this.innerModel().name(); } @Override public OperatingSystemStateTypes osState() { return this.innerModel().osState(); } @Override public OperatingSystemTypes osType() { return this.innerModel().osType(); } @Override public String privacyStatementUri() { return this.innerModel().privacyStatementUri(); } @Override public String provisioningState() { return this.innerModel().provisioningState().toString(); } @Override public ImagePurchasePlan purchasePlan() { return this.innerModel().purchasePlan(); } @Override public RecommendedMachineConfiguration recommendedVirtualMachineConfiguration() { return this.innerModel().recommended(); } @Override public String releaseNoteUri() { return this.innerModel().releaseNoteUri(); } @Override public Map<String, String> tags() { return this.innerModel().tags(); } @Override public String type() { return this.innerModel().type(); } @Override public GalleryImageImpl withExistingGallery(String resourceGroupName, String galleryName) { this.resourceGroupName = resourceGroupName; this.galleryName = galleryName; return this; } @Override public GalleryImageImpl withExistingGallery(Gallery gallery) { this.resourceGroupName = gallery.resourceGroupName(); this.galleryName = gallery.name(); return this; } @Override public GalleryImageImpl withLocation(String location) { this.innerModel().withLocation(location); return this; } @Override public GalleryImageImpl withLocation(Region location) { this.innerModel().withLocation(location.toString()); return this; } @Override public GalleryImageImpl withIdentifier(GalleryImageIdentifier identifier) { this.innerModel().withIdentifier(identifier); return this; } @Override public GalleryImageImpl withIdentifier(String publisher, String offer, String sku) { this .innerModel() .withIdentifier(new GalleryImageIdentifier().withPublisher(publisher).withOffer(offer).withSku(sku)); return this; } @Override public GalleryImageImpl withGeneralizedWindows() { return this.withWindows(OperatingSystemStateTypes.GENERALIZED); } @Override public GalleryImageImpl withGeneralizedLinux() { return this.withLinux(OperatingSystemStateTypes.GENERALIZED); } @Override public GalleryImageImpl withWindows(OperatingSystemStateTypes osState) { this.innerModel().withOsType(OperatingSystemTypes.WINDOWS).withOsState(osState); return this; } @Override public GalleryImageImpl withLinux(OperatingSystemStateTypes osState) { this.innerModel().withOsType(OperatingSystemTypes.LINUX).withOsState(osState); return this; } @Override public GalleryImageImpl withDescription(String description) { this.innerModel().withDescription(description); if (isInUpdateMode()) { this.galleryImageUpdate.withDescription(description); } return this; } @Override public GalleryImageImpl withUnsupportedDiskType(DiskSkuTypes diskType) { if (this.innerModel().disallowed() == null) { this.innerModel().withDisallowed(new Disallowed()); } if (this.innerModel().disallowed().diskTypes() == null) { this.innerModel().disallowed().withDiskTypes(new ArrayList<String>()); } boolean found = false; String newDiskTypeStr = diskType.toString(); for (String diskTypeStr : this.innerModel().disallowed().diskTypes()) { if (diskTypeStr.equalsIgnoreCase(newDiskTypeStr)) { found = true; break; } } if (!found) { this.innerModel().disallowed().diskTypes().add(diskType.toString()); } if (isInUpdateMode()) { this.galleryImageUpdate.withDisallowed(this.innerModel().disallowed()); } return this; } @Override public GalleryImageImpl withUnsupportedDiskTypes(List<DiskSkuTypes> diskTypes) { if (this.innerModel().disallowed() == null) { this.innerModel().withDisallowed(new Disallowed()); } this.innerModel().disallowed().withDiskTypes(new ArrayList<String>()); for (DiskSkuTypes diskType : diskTypes) { this.innerModel().disallowed().diskTypes().add(diskType.toString()); } if (isInUpdateMode()) { this.galleryImageUpdate.withDisallowed(this.innerModel().disallowed()); } return this; } @Override public GalleryImageImpl withoutUnsupportedDiskType(DiskSkuTypes diskType) { if (this.innerModel().disallowed() != null && this.innerModel().disallowed().diskTypes() != null) { int foundIndex = -1; int i = 0; String diskTypeToRemove = diskType.toString(); for (String diskTypeStr : this.innerModel().disallowed().diskTypes()) { if (diskTypeStr.equalsIgnoreCase(diskTypeToRemove)) { foundIndex = i; break; } i++; } if (foundIndex != -1) { this.innerModel().disallowed().diskTypes().remove(foundIndex); } if (isInUpdateMode()) { this.galleryImageUpdate.withDisallowed(this.innerModel().disallowed()); } } return this; } @Override public GalleryImageImpl withDisallowed(Disallowed disallowed) { this.innerModel().withDisallowed(disallowed); if (isInUpdateMode()) { this.galleryImageUpdate.withDisallowed(disallowed); } return this; } @Override @Override public GalleryImageImpl withEula(String eula) { this.innerModel().withEula(eula); if (isInUpdateMode()) { this.galleryImageUpdate.withEula(eula); } return this; } @Override public GalleryImageImpl withOsState(OperatingSystemStateTypes osState) { this.innerModel().withOsState(osState); if (isInUpdateMode()) { this.galleryImageUpdate.withOsState(osState); } return this; } @Override public GalleryImageImpl withPrivacyStatementUri(String privacyStatementUri) { this.innerModel().withPrivacyStatementUri(privacyStatementUri); if (isInUpdateMode()) { this.galleryImageUpdate.withPrivacyStatementUri(privacyStatementUri); } return this; } @Override public GalleryImageImpl withPurchasePlan(String name, String publisher, String product) { return this .withPurchasePlan(new ImagePurchasePlan().withName(name).withPublisher(publisher).withProduct(product)); } @Override public GalleryImageImpl withPurchasePlan(ImagePurchasePlan purchasePlan) { this.innerModel().withPurchasePlan(purchasePlan); return this; } @Override public GalleryImageImpl withRecommendedMinimumCPUsCountForVirtualMachine(int minCount) { if (this.innerModel().recommended() == null) { this.innerModel().withRecommended(new RecommendedMachineConfiguration()); } if (this.innerModel().recommended().vCPUs() == null) { this.innerModel().recommended().withVCPUs(new ResourceRange()); } this.innerModel().recommended().vCPUs().withMin(minCount); if (isInUpdateMode()) { this.galleryImageUpdate.withRecommended(this.innerModel().recommended()); } return this; } @Override public GalleryImageImpl withRecommendedMaximumCPUsCountForVirtualMachine(int maxCount) { if (this.innerModel().recommended() == null) { this.innerModel().withRecommended(new RecommendedMachineConfiguration()); } if (this.innerModel().recommended().vCPUs() == null) { this.innerModel().recommended().withVCPUs(new ResourceRange()); } this.innerModel().recommended().vCPUs().withMax(maxCount); if (isInUpdateMode()) { this.galleryImageUpdate.withRecommended(this.innerModel().recommended()); } return this; } @Override public GalleryImageImpl withRecommendedCPUsCountForVirtualMachine(int minCount, int maxCount) { if (this.innerModel().recommended() == null) { this.innerModel().withRecommended(new RecommendedMachineConfiguration()); } this.innerModel().recommended().withVCPUs(new ResourceRange()); this.innerModel().recommended().vCPUs().withMin(minCount); this.innerModel().recommended().vCPUs().withMax(maxCount); if (isInUpdateMode()) { this.galleryImageUpdate.withRecommended(this.innerModel().recommended()); } return this; } @Override public GalleryImageImpl withRecommendedMinimumMemoryForVirtualMachine(int minMB) { if (this.innerModel().recommended() == null) { this.innerModel().withRecommended(new RecommendedMachineConfiguration()); } if (this.innerModel().recommended().memory() == null) { this.innerModel().recommended().withMemory(new ResourceRange()); } this.innerModel().recommended().memory().withMin(minMB); if (isInUpdateMode()) { this.galleryImageUpdate.withRecommended(this.innerModel().recommended()); } return this; } @Override public GalleryImageImpl withRecommendedMaximumMemoryForVirtualMachine(int maxMB) { if (this.innerModel().recommended() == null) { this.innerModel().withRecommended(new RecommendedMachineConfiguration()); } if (this.innerModel().recommended().memory() == null) { this.innerModel().recommended().withMemory(new ResourceRange()); } this.innerModel().recommended().memory().withMax(maxMB); if (isInUpdateMode()) { this.galleryImageUpdate.withRecommended(this.innerModel().recommended()); } return this; } @Override public GalleryImageImpl withRecommendedMemoryForVirtualMachine(int minMB, int maxMB) { if (this.innerModel().recommended() == null) { this.innerModel().withRecommended(new RecommendedMachineConfiguration()); } this.innerModel().recommended().withMemory(new ResourceRange()); this.innerModel().recommended().memory().withMin(minMB); this.innerModel().recommended().memory().withMax(maxMB); if (isInUpdateMode()) { this.galleryImageUpdate.withRecommended(this.innerModel().recommended()); } return this; } @Override public GalleryImageImpl withRecommendedConfigurationForVirtualMachine( RecommendedMachineConfiguration recommendedConfig) { this.innerModel().withRecommended(recommendedConfig); if (isInUpdateMode()) { this.galleryImageUpdate.withRecommended(recommendedConfig); } return this; } @Override public GalleryImageImpl withReleaseNoteUri(String releaseNoteUri) { this.innerModel().withReleaseNoteUri(releaseNoteUri); if (isInUpdateMode()) { this.galleryImageUpdate.withReleaseNoteUri(releaseNoteUri); } return this; } @Override public GalleryImageImpl withTags(Map<String, String> tags) { this.innerModel().withTags(tags); if (isInUpdateMode()) { this.galleryImageUpdate.withTags(tags); } return this; } private boolean isInUpdateMode() { return !isInCreateMode(); } private static String getValueFromIdByName(String id, String name) { if (id == null) { return null; } Iterable<String> iterable = Arrays.asList(id.split("/")); Iterator<String> itr = iterable.iterator(); while (itr.hasNext()) { String part = itr.next(); if (part != null && !part.trim().isEmpty()) { if (part.equalsIgnoreCase(name)) { if (itr.hasNext()) { return itr.next(); } else { return null; } } } } return null; } }
Because if we want to close cbs channel here, we couldn't use `repeat().takeUtilOther(shutdownSignal)` to stop request new cbs channel after connection closed. Here I try to use `AmqpChannelProcess.dispose()` to mark the processor as disposed and close channels. But this need to double check the reason why use `flatMap(channel -> channel.closeAsync())` previously.
Mono<Void> closeAsync(AmqpShutdownSignal shutdownSignal) { final Mono<Void> emitShutDownSignalOperation = Mono.fromRunnable(() -> { addShutdownSignal(logger.atInfo(), shutdownSignal).log("Disposing of ReactorConnection."); final Sinks.EmitResult result = shutdownSignalSink.tryEmitValue(shutdownSignal); if (result.isFailure()) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(EMIT_RESULT_KEY, result) .log("Unable to emit shutdown signal."); } }); final Mono<Void> cbsCloseOperation; if (cbsChannelProcessor != null) { cbsCloseOperation = Mono.fromRunnable(() -> cbsChannelProcessor.dispose()); } else { cbsCloseOperation = Mono.empty(); } final Mono<Void> managementNodeCloseOperations = Mono.when( Flux.fromStream(managementNodes.values().stream()).flatMap(node -> node.closeAsync())); final Mono<Void> closeReactor = Mono.fromRunnable(() -> { logger.verbose("Scheduling closeConnection work."); final ReactorDispatcher dispatcher = reactorProvider.getReactorDispatcher(); if (dispatcher != null) { try { dispatcher.invoke(() -> closeConnectionWork()); } catch (IOException e) { logger.warning("IOException while scheduling closeConnection work. Manually disposing.", e); closeConnectionWork(); } catch (RejectedExecutionException e) { logger.info("Could not schedule closeConnection work. Manually disposing."); closeConnectionWork(); } } else { closeConnectionWork(); } }); return Mono.whenDelayError( cbsCloseOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed CBS node.")), managementNodeCloseOperations.doFinally(signalType -> logger.atVerbose() .log("Closed management nodes."))) .then(emitShutDownSignalOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Emitted connection shutdown signal. "))) .then(closeReactor.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed reactor dispatcher."))) .then(isClosedMono.asMono()); }
cbsCloseOperation = Mono.fromRunnable(() -> cbsChannelProcessor.dispose());
Mono<Void> closeAsync(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal).log("Disposing of ReactorConnection."); final Sinks.EmitResult result = shutdownSignalSink.tryEmitValue(shutdownSignal); if (result.isFailure()) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(EMIT_RESULT_KEY, result) .log("Unable to emit shutdown signal."); } final Mono<Void> cbsCloseOperation; if (cbsChannelProcessor != null) { cbsCloseOperation = cbsChannelProcessor.flatMap(channel -> channel.closeAsync()); } else { cbsCloseOperation = Mono.empty(); } final Mono<Void> managementNodeCloseOperations = Mono.when( Flux.fromStream(managementNodes.values().stream()).flatMap(node -> node.closeAsync())); final Mono<Void> closeReactor = Mono.fromRunnable(() -> { logger.verbose("Scheduling closeConnection work."); final ReactorDispatcher dispatcher = reactorProvider.getReactorDispatcher(); if (dispatcher != null) { try { dispatcher.invoke(() -> closeConnectionWork()); } catch (IOException e) { logger.warning("IOException while scheduling closeConnection work. Manually disposing.", e); closeConnectionWork(); } catch (RejectedExecutionException e) { logger.info("Could not schedule closeConnection work. Manually disposing."); closeConnectionWork(); } } else { closeConnectionWork(); } }); return Mono.whenDelayError( cbsCloseOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed CBS node.")), managementNodeCloseOperations.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed management nodes."))) .then(closeReactor.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed reactor dispatcher."))) .then(isClosedMono.asMono()); }
class ReactorConnection implements AmqpConnection { private static final String CBS_SESSION_NAME = "cbs-session"; private static final String CBS_ADDRESS = "$cbs"; private static final String CBS_LINK_NAME = "cbs"; private static final String MANAGEMENT_SESSION_NAME = "mgmt-session"; private static final String MANAGEMENT_ADDRESS = "$management"; private static final String MANAGEMENT_LINK_NAME = "mgmt"; private final ClientLogger logger; private final ConcurrentMap<String, SessionSubscription> sessionMap = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, AmqpManagementNode> managementNodes = new ConcurrentHashMap<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.One<AmqpShutdownSignal> shutdownSignalSink = Sinks.one(); private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final String connectionId; private final Mono<Connection> connectionMono; private final ConnectionHandler handler; private final ReactorHandlerProvider handlerProvider; private final TokenManagerProvider tokenManagerProvider; private final MessageSerializer messageSerializer; private final ConnectionOptions connectionOptions; private final ReactorProvider reactorProvider; private final AmqpRetryPolicy retryPolicy; private final SenderSettleMode senderSettleMode; private final ReceiverSettleMode receiverSettleMode; private final Duration operationTimeout; private final Composite subscriptions; private ReactorExecutor executor; private volatile ClaimsBasedSecurityChannel cbsChannel; private volatile AmqpChannelProcessor<RequestResponseChannel> cbsChannelProcessor; private volatile Connection connection; /** * Creates a new AMQP connection that uses proton-j. * * @param connectionId Identifier for the connection. * @param connectionOptions A set of options used to create the AMQP connection. * @param reactorProvider Provides proton-j Reactor instances. * @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events. * @param tokenManagerProvider Provides the appropriate token manager to authorize with CBS node. * @param messageSerializer Serializer to translate objects to and from proton-j {@link Message messages}. * @param senderSettleMode to set as {@link SenderSettleMode} on sender. * @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver. */ public ReactorConnection(String connectionId, ConnectionOptions connectionOptions, ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider, TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode) { this.connectionOptions = connectionOptions; this.reactorProvider = reactorProvider; this.connectionId = connectionId; this.logger = new ClientLogger(ReactorConnection.class, createContextWithConnectionId(connectionId)); this.handlerProvider = handlerProvider; this.tokenManagerProvider = Objects.requireNonNull(tokenManagerProvider, "'tokenManagerProvider' cannot be null."); this.messageSerializer = messageSerializer; this.handler = handlerProvider.createConnectionHandler(connectionId, connectionOptions); this.retryPolicy = RetryUtil.getRetryPolicy(connectionOptions.getRetry()); this.operationTimeout = connectionOptions.getRetry().getTryTimeout(); this.senderSettleMode = senderSettleMode; this.receiverSettleMode = receiverSettleMode; this.connectionMono = Mono.fromCallable(this::getOrCreateConnection) .flatMap(reactorConnection -> { final Mono<AmqpEndpointState> activeEndpoint = getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(operationTimeout, Mono.error(new AmqpException(true, String.format( "Connection '%s' not opened within AmqpRetryOptions.tryTimeout(): %s", connectionId, operationTimeout), handler.getErrorContext()))); return activeEndpoint.thenReturn(reactorConnection); }) .doOnError(error -> { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already disposed: Error occurred while connection was starting.", error); } else { closeAsync(new AmqpShutdownSignal(false, false, "Error occurred while connection was starting. Error: " + error)).subscribe(); } }); this.endpointStates = this.handler.getEndpointStates() .takeUntilOther(shutdownSignalSink.asMono()) .map(state -> { logger.verbose("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .onErrorResume(error -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to error."); return closeAsync(new AmqpShutdownSignal(false, false, error.getMessage())).then(Mono.error(error)); } else { return Mono.error(error); } }) .doOnComplete(() -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to connection close."); closeAsync(new AmqpShutdownSignal(false, false, "Connection handler closed.")).subscribe(); } }) .cache(1); this.subscriptions = Disposables.composite(this.endpointStates.subscribe()); } /** * {@inheritDoc} */ @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } /** * Gets the shutdown signal associated with this connection. When it emits, the underlying connection is closed. * * @return Shutdown signals associated with this connection. It emits a signal when the underlying connection is * closed. */ @Override public Flux<AmqpShutdownSignal> getShutdownSignals() { return shutdownSignalSink.asMono().cache().flux(); } @Override public Mono<AmqpManagementNode> getManagementNode(String entityPath) { return Mono.defer(() -> { if (isDisposed()) { return Mono.error(logger.atError() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(Exceptions.propagate(new IllegalStateException("Connection is disposed. Cannot get management instance.")))); } final AmqpManagementNode existing = managementNodes.get(entityPath); if (existing != null) { return Mono.just(existing); } final TokenManager tokenManager = new AzureTokenManagerProvider(connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()) .getTokenManager(getClaimsBasedSecurityNode(), entityPath); return tokenManager.authorize().thenReturn(managementNodes.compute(entityPath, (key, current) -> { if (current != null) { logger.info("A management node exists already, returning it."); tokenManager.close(); return current; } final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME; final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME; final String address = entityPath + "/" + MANAGEMENT_ADDRESS; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue("address", address) .log("Creating management node."); final AmqpChannelProcessor<RequestResponseChannel> requestResponseChannel = createRequestResponseChannel(sessionName, linkName, address); return new ManagementChannel(requestResponseChannel, getFullyQualifiedNamespace(), entityPath, tokenManager); })); }); } /** * {@inheritDoc} */ @Override public Mono<ClaimsBasedSecurityNode> getClaimsBasedSecurityNode() { return connectionMono.then(Mono.fromCallable(() -> getOrCreateCBSNode())); } @Override public String getId() { return connectionId; } /** * {@inheritDoc} */ @Override public String getFullyQualifiedNamespace() { return handler.getHostname(); } /** * {@inheritDoc} */ @Override public int getMaxFrameSize() { return handler.getMaxFrameSize(); } /** * {@inheritDoc} */ @Override public Map<String, Object> getConnectionProperties() { return handler.getConnectionProperties(); } /** * {@inheritDoc} */ @Override public Mono<AmqpSession> createSession(String sessionName) { return connectionMono.map(connection -> { final SessionSubscription sessionSubscription = sessionMap.computeIfAbsent(sessionName, key -> { final SessionHandler sessionHandler = handlerProvider.createSessionHandler(connectionId, getFullyQualifiedNamespace(), key, connectionOptions.getRetry().getTryTimeout()); final Session session = connection.session(); BaseHandler.setHandler(session, sessionHandler); final AmqpSession amqpSession = createSession(key, session, sessionHandler); final Disposable subscription = amqpSession.getEndpointStates() .subscribe(state -> { }, error -> { if (isDisposed.get()) { return; } logger.atInfo() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Error occurred. Removing and disposing session", error); removeSession(key); }, () -> { if (isDisposed.get()) { return; } logger.atVerbose() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Complete. Removing and disposing session."); removeSession(key); }); return new SessionSubscription(amqpSession, subscription); }); return sessionSubscription; }).flatMap(sessionSubscription -> { final Mono<AmqpEndpointState> activeSession = sessionSubscription.getSession().getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(retryPolicy.getRetryOptions().getTryTimeout(), Mono.error(new AmqpException(true, String.format("connectionId[%s] sessionName[%s] Timeout waiting for session to be active.", connectionId, sessionName), handler.getErrorContext()))); return activeSession.thenReturn(sessionSubscription.getSession()); }); } /** * Creates a new AMQP session with the given parameters. * * @param sessionName Name of the AMQP session. * @param session The reactor session associated with this session. * @param handler Session handler for the reactor session. * * @return A new instance of AMQP session. */ protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) { return new ReactorSession(this, session, handler, sessionName, reactorProvider, handlerProvider, getClaimsBasedSecurityNode(), tokenManagerProvider, messageSerializer, connectionOptions.getRetry()); } /** * {@inheritDoc} */ @Override public boolean removeSession(String sessionName) { if (sessionName == null) { return false; } final SessionSubscription removed = sessionMap.remove(sessionName); if (removed != null) { removed.dispose(); } return removed != null; } @Override public boolean isDisposed() { return isDisposed.get(); } /** * {@inheritDoc} */ @Override public void dispose() { final Duration timeout = operationTimeout.plus(operationTimeout); closeAsync().block(timeout); } /** * Gets the active AMQP connection for this instance. * * @return The AMQP connection. * * @throws AmqpException if the {@link Connection} was not transitioned to an active state within the given * {@link AmqpRetryOptions */ protected Mono<Connection> getReactorConnection() { return connectionMono; } /** * Creates a bidirectional link between the message broker and the client. * * @param sessionName Name of the session. * @param linkName Name of the link. * @param entityPath Address to the message broker. * * @return A new {@link RequestResponseChannel} to communicate with the message broker. */ protected AmqpChannelProcessor<RequestResponseChannel> createRequestResponseChannel(String sessionName, String linkName, String entityPath) { Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); final Flux<RequestResponseChannel> createChannel = createSession(sessionName) .cast(ReactorSession.class) .map(reactorSession -> new RequestResponseChannel(this, getId(), getFullyQualifiedNamespace(), linkName, entityPath, reactorSession.session(), connectionOptions.getRetry(), handlerProvider, reactorProvider, messageSerializer, senderSettleMode, receiverSettleMode)) .doOnNext(e -> { logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .log("Emitting new response channel."); }) .repeat(); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(ENTITY_PATH_KEY, entityPath); return createChannel .subscribeWith(new AmqpChannelProcessor<>(getFullyQualifiedNamespace(), channel -> channel.getEndpointStates(), retryPolicy, loggingContext)); } @Override public Mono<Void> closeAsync() { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already closed. Not disposing again."); return isClosedMono.asMono(); } return closeAsync(new AmqpShutdownSignal(false, true, "Disposed by client.")); } private synchronized void closeConnectionWork() { if (connection == null) { isClosedMono.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atInfo(), signalType, emitResult) .log("Unable to complete closeMono."); return false; }); return; } connection.close(); handler.close(); final ArrayList<Mono<Void>> closingSessions = new ArrayList<>(); sessionMap.values().forEach(link -> closingSessions.add(link.isClosed())); final Mono<Void> closedExecutor = executor != null ? Mono.defer(() -> { synchronized (this) { logger.info("Closing executor."); return executor.closeAsync(); } }) : Mono.empty(); final Mono<Void> closeSessionAndExecutorMono = Mono.when(closingSessions) .timeout(operationTimeout) .onErrorResume(error -> { logger.info("Timed out waiting for all sessions to close."); return Mono.empty(); }) .then(closedExecutor) .then(Mono.fromRunnable(() -> { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit connection closed signal."); return false; }); subscriptions.dispose(); })); subscriptions.add(closeSessionAndExecutorMono.subscribe()); } private synchronized ClaimsBasedSecurityNode getOrCreateCBSNode() { if (cbsChannel == null) { logger.info("Setting CBS channel."); cbsChannelProcessor = createRequestResponseChannel(CBS_SESSION_NAME, CBS_LINK_NAME, CBS_ADDRESS); cbsChannel = new ClaimsBasedSecurityChannel( cbsChannelProcessor, connectionOptions.getTokenCredential(), connectionOptions.getAuthorizationType(), connectionOptions.getRetry()); } return cbsChannel; } private synchronized Connection getOrCreateConnection() throws IOException { if (connection == null) { logger.atInfo() .addKeyValue(HOSTNAME_KEY, handler.getHostname()) .addKeyValue("port", handler.getProtocolPort()) .log("Creating and starting connection."); final Reactor reactor = reactorProvider.createReactor(connectionId, handler.getMaxFrameSize()); connection = reactor.connectionToHost(handler.getHostname(), handler.getProtocolPort(), handler); final ReactorExceptionHandler reactorExceptionHandler = new ReactorExceptionHandler(); final Duration timeoutDivided = connectionOptions.getRetry().getTryTimeout().dividedBy(2); final Duration pendingTasksDuration = ClientConstants.SERVER_BUSY_WAIT_TIME.compareTo(timeoutDivided) < 0 ? ClientConstants.SERVER_BUSY_WAIT_TIME : timeoutDivided; final Scheduler scheduler = Schedulers.newSingle("reactor-executor"); executor = new ReactorExecutor(reactor, scheduler, connectionId, reactorExceptionHandler, pendingTasksDuration, connectionOptions.getFullyQualifiedNamespace()); final Mono<Void> executorCloseMono = Mono.defer(() -> { synchronized (this) { return executor.closeAsync(); } }); reactorProvider.getReactorDispatcher().getShutdownSignal() .flatMap(signal -> { reactorExceptionHandler.onConnectionShutdown(signal); return executorCloseMono; }) .onErrorResume(error -> { reactorExceptionHandler.onConnectionError(error); return executorCloseMono; }) .subscribe(); executor.start(); } return connection; } private final class ReactorExceptionHandler extends AmqpExceptionHandler { private ReactorExceptionHandler() { super(); } @Override public void onConnectionError(Throwable exception) { logger.atInfo() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionError, Starting new reactor", exception); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onReactorError: Disposing."); closeAsync(new AmqpShutdownSignal(false, false, "onReactorError: " + exception.toString())) .subscribe(); } } @Override void onConnectionShutdown(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown. Shutting down."); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown: disposing."); closeAsync(shutdownSignal).subscribe(); } } } private static final class SessionSubscription { private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AmqpSession session; private final Disposable subscription; private SessionSubscription(AmqpSession session, Disposable subscription) { this.session = session; this.subscription = subscription; } private AmqpSession getSession() { return session; } private void dispose() { if (isDisposed.getAndSet(true)) { return; } if (session instanceof ReactorSession) { ((ReactorSession) session).closeAsync("Closing session.", null, true) .subscribe(); } else { session.dispose(); } subscription.dispose(); } private Mono<Void> isClosed() { if (session instanceof ReactorSession) { return ((ReactorSession) session).isClosed(); } else { return Mono.empty(); } } } }
class ReactorConnection implements AmqpConnection { private static final String CBS_SESSION_NAME = "cbs-session"; private static final String CBS_ADDRESS = "$cbs"; private static final String CBS_LINK_NAME = "cbs"; private static final String MANAGEMENT_SESSION_NAME = "mgmt-session"; private static final String MANAGEMENT_ADDRESS = "$management"; private static final String MANAGEMENT_LINK_NAME = "mgmt"; private final ClientLogger logger; private final ConcurrentMap<String, SessionSubscription> sessionMap = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, AmqpManagementNode> managementNodes = new ConcurrentHashMap<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.One<AmqpShutdownSignal> shutdownSignalSink = Sinks.one(); private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final String connectionId; private final Mono<Connection> connectionMono; private final ConnectionHandler handler; private final ReactorHandlerProvider handlerProvider; private final TokenManagerProvider tokenManagerProvider; private final MessageSerializer messageSerializer; private final ConnectionOptions connectionOptions; private final ReactorProvider reactorProvider; private final AmqpRetryPolicy retryPolicy; private final SenderSettleMode senderSettleMode; private final ReceiverSettleMode receiverSettleMode; private final Duration operationTimeout; private final Composite subscriptions; private ReactorExecutor executor; private volatile ClaimsBasedSecurityChannel cbsChannel; private volatile AmqpChannelProcessor<RequestResponseChannel> cbsChannelProcessor; private volatile Connection connection; /** * Creates a new AMQP connection that uses proton-j. * * @param connectionId Identifier for the connection. * @param connectionOptions A set of options used to create the AMQP connection. * @param reactorProvider Provides proton-j Reactor instances. * @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events. * @param tokenManagerProvider Provides the appropriate token manager to authorize with CBS node. * @param messageSerializer Serializer to translate objects to and from proton-j {@link Message messages}. * @param senderSettleMode to set as {@link SenderSettleMode} on sender. * @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver. */ public ReactorConnection(String connectionId, ConnectionOptions connectionOptions, ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider, TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode) { this.connectionOptions = connectionOptions; this.reactorProvider = reactorProvider; this.connectionId = connectionId; this.logger = new ClientLogger(ReactorConnection.class, createContextWithConnectionId(connectionId)); this.handlerProvider = handlerProvider; this.tokenManagerProvider = Objects.requireNonNull(tokenManagerProvider, "'tokenManagerProvider' cannot be null."); this.messageSerializer = messageSerializer; this.handler = handlerProvider.createConnectionHandler(connectionId, connectionOptions); this.retryPolicy = RetryUtil.getRetryPolicy(connectionOptions.getRetry()); this.operationTimeout = connectionOptions.getRetry().getTryTimeout(); this.senderSettleMode = senderSettleMode; this.receiverSettleMode = receiverSettleMode; this.connectionMono = Mono.fromCallable(this::getOrCreateConnection) .flatMap(reactorConnection -> { final Mono<AmqpEndpointState> activeEndpoint = getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(operationTimeout, Mono.error(() -> new AmqpException(true, String.format( "Connection '%s' not opened within AmqpRetryOptions.tryTimeout(): %s", connectionId, operationTimeout), handler.getErrorContext()))); return activeEndpoint.thenReturn(reactorConnection); }) .doOnError(error -> { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already disposed: Error occurred while connection was starting.", error); } else { closeAsync(new AmqpShutdownSignal(false, false, "Error occurred while connection was starting. Error: " + error)).subscribe(); } }); this.endpointStates = this.handler.getEndpointStates() .takeUntilOther(shutdownSignalSink.asMono()) .map(state -> { logger.verbose("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .onErrorResume(error -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to error."); return closeAsync(new AmqpShutdownSignal(false, false, error.getMessage())).then(Mono.error(error)); } else { return Mono.error(error); } }) .doOnComplete(() -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to connection close."); closeAsync(new AmqpShutdownSignal(false, false, "Connection handler closed.")).subscribe(); } }) .cache(1); this.subscriptions = Disposables.composite(this.endpointStates.subscribe()); } /** * {@inheritDoc} */ @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } /** * Gets the shutdown signal associated with this connection. When it emits, the underlying connection is closed. * * @return Shutdown signals associated with this connection. It emits a signal when the underlying connection is * closed. */ @Override public Flux<AmqpShutdownSignal> getShutdownSignals() { return shutdownSignalSink.asMono().cache().flux(); } @Override public Mono<AmqpManagementNode> getManagementNode(String entityPath) { return Mono.defer(() -> { if (isDisposed()) { return Mono.error(logger.atError() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(Exceptions.propagate(new IllegalStateException("Connection is disposed. Cannot get management instance.")))); } final AmqpManagementNode existing = managementNodes.get(entityPath); if (existing != null) { return Mono.just(existing); } final TokenManager tokenManager = new AzureTokenManagerProvider(connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()) .getTokenManager(getClaimsBasedSecurityNode(), entityPath); return tokenManager.authorize().thenReturn(managementNodes.compute(entityPath, (key, current) -> { if (current != null) { logger.info("A management node exists already, returning it."); tokenManager.close(); return current; } final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME; final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME; final String address = entityPath + "/" + MANAGEMENT_ADDRESS; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue("address", address) .log("Creating management node."); final AmqpChannelProcessor<RequestResponseChannel> requestResponseChannel = createRequestResponseChannel(sessionName, linkName, address); return new ManagementChannel(requestResponseChannel, getFullyQualifiedNamespace(), entityPath, tokenManager); })); }); } /** * {@inheritDoc} */ @Override public Mono<ClaimsBasedSecurityNode> getClaimsBasedSecurityNode() { return connectionMono.then(Mono.fromCallable(() -> getOrCreateCBSNode())); } @Override public String getId() { return connectionId; } /** * {@inheritDoc} */ @Override public String getFullyQualifiedNamespace() { return handler.getHostname(); } /** * {@inheritDoc} */ @Override public int getMaxFrameSize() { return handler.getMaxFrameSize(); } /** * {@inheritDoc} */ @Override public Map<String, Object> getConnectionProperties() { return handler.getConnectionProperties(); } /** * {@inheritDoc} */ @Override public Mono<AmqpSession> createSession(String sessionName) { return connectionMono.map(connection -> { final SessionSubscription sessionSubscription = sessionMap.computeIfAbsent(sessionName, key -> { final SessionHandler sessionHandler = handlerProvider.createSessionHandler(connectionId, getFullyQualifiedNamespace(), key, connectionOptions.getRetry().getTryTimeout()); final Session session = connection.session(); BaseHandler.setHandler(session, sessionHandler); final AmqpSession amqpSession = createSession(key, session, sessionHandler); final Disposable subscription = amqpSession.getEndpointStates() .subscribe(state -> { }, error -> { if (isDisposed.get()) { return; } logger.atInfo() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Error occurred. Removing and disposing session", error); removeSession(key); }, () -> { if (isDisposed.get()) { return; } logger.atVerbose() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Complete. Removing and disposing session."); removeSession(key); }); return new SessionSubscription(amqpSession, subscription); }); return sessionSubscription; }).flatMap(sessionSubscription -> { final Mono<AmqpEndpointState> activeSession = sessionSubscription.getSession().getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(retryPolicy.getRetryOptions().getTryTimeout(), Mono.error(() -> new AmqpException(true, String.format("connectionId[%s] sessionName[%s] Timeout waiting for session to be active.", connectionId, sessionName), handler.getErrorContext()))); return activeSession.thenReturn(sessionSubscription.getSession()); }); } /** * Creates a new AMQP session with the given parameters. * * @param sessionName Name of the AMQP session. * @param session The reactor session associated with this session. * @param handler Session handler for the reactor session. * * @return A new instance of AMQP session. */ protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) { return new ReactorSession(this, session, handler, sessionName, reactorProvider, handlerProvider, getClaimsBasedSecurityNode(), tokenManagerProvider, messageSerializer, connectionOptions.getRetry()); } /** * {@inheritDoc} */ @Override public boolean removeSession(String sessionName) { if (sessionName == null) { return false; } final SessionSubscription removed = sessionMap.remove(sessionName); if (removed != null) { removed.dispose(); } return removed != null; } @Override public boolean isDisposed() { return isDisposed.get(); } /** * {@inheritDoc} */ @Override public void dispose() { final Duration timeout = operationTimeout.plus(operationTimeout); closeAsync().block(timeout); } /** * Gets the active AMQP connection for this instance. * * @return The AMQP connection. * * @throws AmqpException if the {@link Connection} was not transitioned to an active state within the given * {@link AmqpRetryOptions */ protected Mono<Connection> getReactorConnection() { return connectionMono; } /** * Creates a bidirectional link between the message broker and the client. * * @param sessionName Name of the session. * @param linkName Name of the link. * @param entityPath Address to the message broker. * * @return A new {@link RequestResponseChannel} to communicate with the message broker. */ protected AmqpChannelProcessor<RequestResponseChannel> createRequestResponseChannel(String sessionName, String linkName, String entityPath) { Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); final Flux<RequestResponseChannel> createChannel = createSession(sessionName) .cast(ReactorSession.class) .map(reactorSession -> new RequestResponseChannel(this, getId(), getFullyQualifiedNamespace(), linkName, entityPath, reactorSession.session(), connectionOptions.getRetry(), handlerProvider, reactorProvider, messageSerializer, senderSettleMode, receiverSettleMode)) .doOnNext(e -> { logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .log("Emitting new response channel."); }) .repeat(() -> !this.isDisposed()); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(ENTITY_PATH_KEY, entityPath); return createChannel .subscribeWith(new AmqpChannelProcessor<>(getFullyQualifiedNamespace(), channel -> channel.getEndpointStates(), retryPolicy, loggingContext)); } @Override public Mono<Void> closeAsync() { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already closed. Not disposing again."); return isClosedMono.asMono(); } return closeAsync(new AmqpShutdownSignal(false, true, "Disposed by client.")); } private synchronized void closeConnectionWork() { if (connection == null) { isClosedMono.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atInfo(), signalType, emitResult) .log("Unable to complete closeMono."); return false; }); return; } connection.close(); handler.close(); final ArrayList<Mono<Void>> closingSessions = new ArrayList<>(); sessionMap.values().forEach(link -> closingSessions.add(link.isClosed())); final Mono<Void> closedExecutor = executor != null ? Mono.defer(() -> { synchronized (this) { logger.info("Closing executor."); return executor.closeAsync(); } }) : Mono.empty(); final Mono<Void> closeSessionAndExecutorMono = Mono.when(closingSessions) .timeout(operationTimeout) .onErrorResume(error -> { logger.info("Timed out waiting for all sessions to close."); return Mono.empty(); }) .then(closedExecutor) .then(Mono.fromRunnable(() -> { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit connection closed signal."); return false; }); subscriptions.dispose(); })); subscriptions.add(closeSessionAndExecutorMono.subscribe()); } private synchronized ClaimsBasedSecurityNode getOrCreateCBSNode() { if (cbsChannel == null) { logger.info("Setting CBS channel."); cbsChannelProcessor = createRequestResponseChannel(CBS_SESSION_NAME, CBS_LINK_NAME, CBS_ADDRESS); cbsChannel = new ClaimsBasedSecurityChannel( cbsChannelProcessor, connectionOptions.getTokenCredential(), connectionOptions.getAuthorizationType(), connectionOptions.getRetry()); } return cbsChannel; } private synchronized Connection getOrCreateConnection() throws IOException { if (connection == null) { logger.atInfo() .addKeyValue(HOSTNAME_KEY, handler.getHostname()) .addKeyValue("port", handler.getProtocolPort()) .log("Creating and starting connection."); final Reactor reactor = reactorProvider.createReactor(connectionId, handler.getMaxFrameSize()); connection = reactor.connectionToHost(handler.getHostname(), handler.getProtocolPort(), handler); final ReactorExceptionHandler reactorExceptionHandler = new ReactorExceptionHandler(); final Duration timeoutDivided = connectionOptions.getRetry().getTryTimeout().dividedBy(2); final Duration pendingTasksDuration = ClientConstants.SERVER_BUSY_WAIT_TIME.compareTo(timeoutDivided) < 0 ? ClientConstants.SERVER_BUSY_WAIT_TIME : timeoutDivided; final Scheduler scheduler = Schedulers.newSingle("reactor-executor"); executor = new ReactorExecutor(reactor, scheduler, connectionId, reactorExceptionHandler, pendingTasksDuration, connectionOptions.getFullyQualifiedNamespace()); final Mono<Void> executorCloseMono = Mono.defer(() -> { synchronized (this) { return executor.closeAsync(); } }); reactorProvider.getReactorDispatcher().getShutdownSignal() .flatMap(signal -> { reactorExceptionHandler.onConnectionShutdown(signal); return executorCloseMono; }) .onErrorResume(error -> { reactorExceptionHandler.onConnectionError(error); return executorCloseMono; }) .subscribe(); executor.start(); } return connection; } private final class ReactorExceptionHandler extends AmqpExceptionHandler { private ReactorExceptionHandler() { super(); } @Override public void onConnectionError(Throwable exception) { logger.atInfo() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionError, Starting new reactor", exception); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onReactorError: Disposing."); closeAsync(new AmqpShutdownSignal(false, false, "onReactorError: " + exception.toString())) .subscribe(); } } @Override void onConnectionShutdown(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown. Shutting down."); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown: disposing."); closeAsync(shutdownSignal).subscribe(); } } } private static final class SessionSubscription { private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AmqpSession session; private final Disposable subscription; private SessionSubscription(AmqpSession session, Disposable subscription) { this.session = session; this.subscription = subscription; } private AmqpSession getSession() { return session; } private void dispose() { if (isDisposed.getAndSet(true)) { return; } if (session instanceof ReactorSession) { ((ReactorSession) session).closeAsync("Closing session.", null, true) .subscribe(); } else { session.dispose(); } subscription.dispose(); } private Mono<Void> isClosed() { if (session instanceof ReactorSession) { return ((ReactorSession) session).isClosed(); } else { return Mono.empty(); } } } }
emitShutDownSignal will trigger close session and sender/receiver. Here it can make sure session is closing after CBS + mgmt node closed.
Mono<Void> closeAsync(AmqpShutdownSignal shutdownSignal) { final Mono<Void> emitShutDownSignalOperation = Mono.fromRunnable(() -> { addShutdownSignal(logger.atInfo(), shutdownSignal).log("Disposing of ReactorConnection."); final Sinks.EmitResult result = shutdownSignalSink.tryEmitValue(shutdownSignal); if (result.isFailure()) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(EMIT_RESULT_KEY, result) .log("Unable to emit shutdown signal."); } }); final Mono<Void> cbsCloseOperation; if (cbsChannelProcessor != null) { cbsCloseOperation = Mono.fromRunnable(() -> cbsChannelProcessor.dispose()); } else { cbsCloseOperation = Mono.empty(); } final Mono<Void> managementNodeCloseOperations = Mono.when( Flux.fromStream(managementNodes.values().stream()).flatMap(node -> node.closeAsync())); final Mono<Void> closeReactor = Mono.fromRunnable(() -> { logger.verbose("Scheduling closeConnection work."); final ReactorDispatcher dispatcher = reactorProvider.getReactorDispatcher(); if (dispatcher != null) { try { dispatcher.invoke(() -> closeConnectionWork()); } catch (IOException e) { logger.warning("IOException while scheduling closeConnection work. Manually disposing.", e); closeConnectionWork(); } catch (RejectedExecutionException e) { logger.info("Could not schedule closeConnection work. Manually disposing."); closeConnectionWork(); } } else { closeConnectionWork(); } }); return Mono.whenDelayError( cbsCloseOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed CBS node.")), managementNodeCloseOperations.doFinally(signalType -> logger.atVerbose() .log("Closed management nodes."))) .then(emitShutDownSignalOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Emitted connection shutdown signal. "))) .then(closeReactor.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed reactor dispatcher."))) .then(isClosedMono.asMono()); }
.then(emitShutDownSignalOperation.doFinally(signalType ->
Mono<Void> closeAsync(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal).log("Disposing of ReactorConnection."); final Sinks.EmitResult result = shutdownSignalSink.tryEmitValue(shutdownSignal); if (result.isFailure()) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(EMIT_RESULT_KEY, result) .log("Unable to emit shutdown signal."); } final Mono<Void> cbsCloseOperation; if (cbsChannelProcessor != null) { cbsCloseOperation = cbsChannelProcessor.flatMap(channel -> channel.closeAsync()); } else { cbsCloseOperation = Mono.empty(); } final Mono<Void> managementNodeCloseOperations = Mono.when( Flux.fromStream(managementNodes.values().stream()).flatMap(node -> node.closeAsync())); final Mono<Void> closeReactor = Mono.fromRunnable(() -> { logger.verbose("Scheduling closeConnection work."); final ReactorDispatcher dispatcher = reactorProvider.getReactorDispatcher(); if (dispatcher != null) { try { dispatcher.invoke(() -> closeConnectionWork()); } catch (IOException e) { logger.warning("IOException while scheduling closeConnection work. Manually disposing.", e); closeConnectionWork(); } catch (RejectedExecutionException e) { logger.info("Could not schedule closeConnection work. Manually disposing."); closeConnectionWork(); } } else { closeConnectionWork(); } }); return Mono.whenDelayError( cbsCloseOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed CBS node.")), managementNodeCloseOperations.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed management nodes."))) .then(closeReactor.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed reactor dispatcher."))) .then(isClosedMono.asMono()); }
class ReactorConnection implements AmqpConnection { private static final String CBS_SESSION_NAME = "cbs-session"; private static final String CBS_ADDRESS = "$cbs"; private static final String CBS_LINK_NAME = "cbs"; private static final String MANAGEMENT_SESSION_NAME = "mgmt-session"; private static final String MANAGEMENT_ADDRESS = "$management"; private static final String MANAGEMENT_LINK_NAME = "mgmt"; private final ClientLogger logger; private final ConcurrentMap<String, SessionSubscription> sessionMap = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, AmqpManagementNode> managementNodes = new ConcurrentHashMap<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.One<AmqpShutdownSignal> shutdownSignalSink = Sinks.one(); private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final String connectionId; private final Mono<Connection> connectionMono; private final ConnectionHandler handler; private final ReactorHandlerProvider handlerProvider; private final TokenManagerProvider tokenManagerProvider; private final MessageSerializer messageSerializer; private final ConnectionOptions connectionOptions; private final ReactorProvider reactorProvider; private final AmqpRetryPolicy retryPolicy; private final SenderSettleMode senderSettleMode; private final ReceiverSettleMode receiverSettleMode; private final Duration operationTimeout; private final Composite subscriptions; private ReactorExecutor executor; private volatile ClaimsBasedSecurityChannel cbsChannel; private volatile AmqpChannelProcessor<RequestResponseChannel> cbsChannelProcessor; private volatile Connection connection; /** * Creates a new AMQP connection that uses proton-j. * * @param connectionId Identifier for the connection. * @param connectionOptions A set of options used to create the AMQP connection. * @param reactorProvider Provides proton-j Reactor instances. * @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events. * @param tokenManagerProvider Provides the appropriate token manager to authorize with CBS node. * @param messageSerializer Serializer to translate objects to and from proton-j {@link Message messages}. * @param senderSettleMode to set as {@link SenderSettleMode} on sender. * @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver. */ public ReactorConnection(String connectionId, ConnectionOptions connectionOptions, ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider, TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode) { this.connectionOptions = connectionOptions; this.reactorProvider = reactorProvider; this.connectionId = connectionId; this.logger = new ClientLogger(ReactorConnection.class, createContextWithConnectionId(connectionId)); this.handlerProvider = handlerProvider; this.tokenManagerProvider = Objects.requireNonNull(tokenManagerProvider, "'tokenManagerProvider' cannot be null."); this.messageSerializer = messageSerializer; this.handler = handlerProvider.createConnectionHandler(connectionId, connectionOptions); this.retryPolicy = RetryUtil.getRetryPolicy(connectionOptions.getRetry()); this.operationTimeout = connectionOptions.getRetry().getTryTimeout(); this.senderSettleMode = senderSettleMode; this.receiverSettleMode = receiverSettleMode; this.connectionMono = Mono.fromCallable(this::getOrCreateConnection) .flatMap(reactorConnection -> { final Mono<AmqpEndpointState> activeEndpoint = getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(operationTimeout, Mono.error(new AmqpException(true, String.format( "Connection '%s' not opened within AmqpRetryOptions.tryTimeout(): %s", connectionId, operationTimeout), handler.getErrorContext()))); return activeEndpoint.thenReturn(reactorConnection); }) .doOnError(error -> { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already disposed: Error occurred while connection was starting.", error); } else { closeAsync(new AmqpShutdownSignal(false, false, "Error occurred while connection was starting. Error: " + error)).subscribe(); } }); this.endpointStates = this.handler.getEndpointStates() .takeUntilOther(shutdownSignalSink.asMono()) .map(state -> { logger.verbose("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .onErrorResume(error -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to error."); return closeAsync(new AmqpShutdownSignal(false, false, error.getMessage())).then(Mono.error(error)); } else { return Mono.error(error); } }) .doOnComplete(() -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to connection close."); closeAsync(new AmqpShutdownSignal(false, false, "Connection handler closed.")).subscribe(); } }) .cache(1); this.subscriptions = Disposables.composite(this.endpointStates.subscribe()); } /** * {@inheritDoc} */ @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } /** * Gets the shutdown signal associated with this connection. When it emits, the underlying connection is closed. * * @return Shutdown signals associated with this connection. It emits a signal when the underlying connection is * closed. */ @Override public Flux<AmqpShutdownSignal> getShutdownSignals() { return shutdownSignalSink.asMono().cache().flux(); } @Override public Mono<AmqpManagementNode> getManagementNode(String entityPath) { return Mono.defer(() -> { if (isDisposed()) { return Mono.error(logger.atError() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(Exceptions.propagate(new IllegalStateException("Connection is disposed. Cannot get management instance.")))); } final AmqpManagementNode existing = managementNodes.get(entityPath); if (existing != null) { return Mono.just(existing); } final TokenManager tokenManager = new AzureTokenManagerProvider(connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()) .getTokenManager(getClaimsBasedSecurityNode(), entityPath); return tokenManager.authorize().thenReturn(managementNodes.compute(entityPath, (key, current) -> { if (current != null) { logger.info("A management node exists already, returning it."); tokenManager.close(); return current; } final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME; final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME; final String address = entityPath + "/" + MANAGEMENT_ADDRESS; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue("address", address) .log("Creating management node."); final AmqpChannelProcessor<RequestResponseChannel> requestResponseChannel = createRequestResponseChannel(sessionName, linkName, address); return new ManagementChannel(requestResponseChannel, getFullyQualifiedNamespace(), entityPath, tokenManager); })); }); } /** * {@inheritDoc} */ @Override public Mono<ClaimsBasedSecurityNode> getClaimsBasedSecurityNode() { return connectionMono.then(Mono.fromCallable(() -> getOrCreateCBSNode())); } @Override public String getId() { return connectionId; } /** * {@inheritDoc} */ @Override public String getFullyQualifiedNamespace() { return handler.getHostname(); } /** * {@inheritDoc} */ @Override public int getMaxFrameSize() { return handler.getMaxFrameSize(); } /** * {@inheritDoc} */ @Override public Map<String, Object> getConnectionProperties() { return handler.getConnectionProperties(); } /** * {@inheritDoc} */ @Override public Mono<AmqpSession> createSession(String sessionName) { return connectionMono.map(connection -> { final SessionSubscription sessionSubscription = sessionMap.computeIfAbsent(sessionName, key -> { final SessionHandler sessionHandler = handlerProvider.createSessionHandler(connectionId, getFullyQualifiedNamespace(), key, connectionOptions.getRetry().getTryTimeout()); final Session session = connection.session(); BaseHandler.setHandler(session, sessionHandler); final AmqpSession amqpSession = createSession(key, session, sessionHandler); final Disposable subscription = amqpSession.getEndpointStates() .subscribe(state -> { }, error -> { if (isDisposed.get()) { return; } logger.atInfo() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Error occurred. Removing and disposing session", error); removeSession(key); }, () -> { if (isDisposed.get()) { return; } logger.atVerbose() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Complete. Removing and disposing session."); removeSession(key); }); return new SessionSubscription(amqpSession, subscription); }); return sessionSubscription; }).flatMap(sessionSubscription -> { final Mono<AmqpEndpointState> activeSession = sessionSubscription.getSession().getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(retryPolicy.getRetryOptions().getTryTimeout(), Mono.error(new AmqpException(true, String.format("connectionId[%s] sessionName[%s] Timeout waiting for session to be active.", connectionId, sessionName), handler.getErrorContext()))); return activeSession.thenReturn(sessionSubscription.getSession()); }); } /** * Creates a new AMQP session with the given parameters. * * @param sessionName Name of the AMQP session. * @param session The reactor session associated with this session. * @param handler Session handler for the reactor session. * * @return A new instance of AMQP session. */ protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) { return new ReactorSession(this, session, handler, sessionName, reactorProvider, handlerProvider, getClaimsBasedSecurityNode(), tokenManagerProvider, messageSerializer, connectionOptions.getRetry()); } /** * {@inheritDoc} */ @Override public boolean removeSession(String sessionName) { if (sessionName == null) { return false; } final SessionSubscription removed = sessionMap.remove(sessionName); if (removed != null) { removed.dispose(); } return removed != null; } @Override public boolean isDisposed() { return isDisposed.get(); } /** * {@inheritDoc} */ @Override public void dispose() { final Duration timeout = operationTimeout.plus(operationTimeout); closeAsync().block(timeout); } /** * Gets the active AMQP connection for this instance. * * @return The AMQP connection. * * @throws AmqpException if the {@link Connection} was not transitioned to an active state within the given * {@link AmqpRetryOptions */ protected Mono<Connection> getReactorConnection() { return connectionMono; } /** * Creates a bidirectional link between the message broker and the client. * * @param sessionName Name of the session. * @param linkName Name of the link. * @param entityPath Address to the message broker. * * @return A new {@link RequestResponseChannel} to communicate with the message broker. */ protected AmqpChannelProcessor<RequestResponseChannel> createRequestResponseChannel(String sessionName, String linkName, String entityPath) { Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); final Flux<RequestResponseChannel> createChannel = createSession(sessionName) .cast(ReactorSession.class) .map(reactorSession -> new RequestResponseChannel(this, getId(), getFullyQualifiedNamespace(), linkName, entityPath, reactorSession.session(), connectionOptions.getRetry(), handlerProvider, reactorProvider, messageSerializer, senderSettleMode, receiverSettleMode)) .doOnNext(e -> { logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .log("Emitting new response channel."); }) .repeat(); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(ENTITY_PATH_KEY, entityPath); return createChannel .subscribeWith(new AmqpChannelProcessor<>(getFullyQualifiedNamespace(), channel -> channel.getEndpointStates(), retryPolicy, loggingContext)); } @Override public Mono<Void> closeAsync() { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already closed. Not disposing again."); return isClosedMono.asMono(); } return closeAsync(new AmqpShutdownSignal(false, true, "Disposed by client.")); } private synchronized void closeConnectionWork() { if (connection == null) { isClosedMono.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atInfo(), signalType, emitResult) .log("Unable to complete closeMono."); return false; }); return; } connection.close(); handler.close(); final ArrayList<Mono<Void>> closingSessions = new ArrayList<>(); sessionMap.values().forEach(link -> closingSessions.add(link.isClosed())); final Mono<Void> closedExecutor = executor != null ? Mono.defer(() -> { synchronized (this) { logger.info("Closing executor."); return executor.closeAsync(); } }) : Mono.empty(); final Mono<Void> closeSessionAndExecutorMono = Mono.when(closingSessions) .timeout(operationTimeout) .onErrorResume(error -> { logger.info("Timed out waiting for all sessions to close."); return Mono.empty(); }) .then(closedExecutor) .then(Mono.fromRunnable(() -> { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit connection closed signal."); return false; }); subscriptions.dispose(); })); subscriptions.add(closeSessionAndExecutorMono.subscribe()); } private synchronized ClaimsBasedSecurityNode getOrCreateCBSNode() { if (cbsChannel == null) { logger.info("Setting CBS channel."); cbsChannelProcessor = createRequestResponseChannel(CBS_SESSION_NAME, CBS_LINK_NAME, CBS_ADDRESS); cbsChannel = new ClaimsBasedSecurityChannel( cbsChannelProcessor, connectionOptions.getTokenCredential(), connectionOptions.getAuthorizationType(), connectionOptions.getRetry()); } return cbsChannel; } private synchronized Connection getOrCreateConnection() throws IOException { if (connection == null) { logger.atInfo() .addKeyValue(HOSTNAME_KEY, handler.getHostname()) .addKeyValue("port", handler.getProtocolPort()) .log("Creating and starting connection."); final Reactor reactor = reactorProvider.createReactor(connectionId, handler.getMaxFrameSize()); connection = reactor.connectionToHost(handler.getHostname(), handler.getProtocolPort(), handler); final ReactorExceptionHandler reactorExceptionHandler = new ReactorExceptionHandler(); final Duration timeoutDivided = connectionOptions.getRetry().getTryTimeout().dividedBy(2); final Duration pendingTasksDuration = ClientConstants.SERVER_BUSY_WAIT_TIME.compareTo(timeoutDivided) < 0 ? ClientConstants.SERVER_BUSY_WAIT_TIME : timeoutDivided; final Scheduler scheduler = Schedulers.newSingle("reactor-executor"); executor = new ReactorExecutor(reactor, scheduler, connectionId, reactorExceptionHandler, pendingTasksDuration, connectionOptions.getFullyQualifiedNamespace()); final Mono<Void> executorCloseMono = Mono.defer(() -> { synchronized (this) { return executor.closeAsync(); } }); reactorProvider.getReactorDispatcher().getShutdownSignal() .flatMap(signal -> { reactorExceptionHandler.onConnectionShutdown(signal); return executorCloseMono; }) .onErrorResume(error -> { reactorExceptionHandler.onConnectionError(error); return executorCloseMono; }) .subscribe(); executor.start(); } return connection; } private final class ReactorExceptionHandler extends AmqpExceptionHandler { private ReactorExceptionHandler() { super(); } @Override public void onConnectionError(Throwable exception) { logger.atInfo() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionError, Starting new reactor", exception); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onReactorError: Disposing."); closeAsync(new AmqpShutdownSignal(false, false, "onReactorError: " + exception.toString())) .subscribe(); } } @Override void onConnectionShutdown(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown. Shutting down."); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown: disposing."); closeAsync(shutdownSignal).subscribe(); } } } private static final class SessionSubscription { private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AmqpSession session; private final Disposable subscription; private SessionSubscription(AmqpSession session, Disposable subscription) { this.session = session; this.subscription = subscription; } private AmqpSession getSession() { return session; } private void dispose() { if (isDisposed.getAndSet(true)) { return; } if (session instanceof ReactorSession) { ((ReactorSession) session).closeAsync("Closing session.", null, true) .subscribe(); } else { session.dispose(); } subscription.dispose(); } private Mono<Void> isClosed() { if (session instanceof ReactorSession) { return ((ReactorSession) session).isClosed(); } else { return Mono.empty(); } } } }
class ReactorConnection implements AmqpConnection { private static final String CBS_SESSION_NAME = "cbs-session"; private static final String CBS_ADDRESS = "$cbs"; private static final String CBS_LINK_NAME = "cbs"; private static final String MANAGEMENT_SESSION_NAME = "mgmt-session"; private static final String MANAGEMENT_ADDRESS = "$management"; private static final String MANAGEMENT_LINK_NAME = "mgmt"; private final ClientLogger logger; private final ConcurrentMap<String, SessionSubscription> sessionMap = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, AmqpManagementNode> managementNodes = new ConcurrentHashMap<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.One<AmqpShutdownSignal> shutdownSignalSink = Sinks.one(); private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final String connectionId; private final Mono<Connection> connectionMono; private final ConnectionHandler handler; private final ReactorHandlerProvider handlerProvider; private final TokenManagerProvider tokenManagerProvider; private final MessageSerializer messageSerializer; private final ConnectionOptions connectionOptions; private final ReactorProvider reactorProvider; private final AmqpRetryPolicy retryPolicy; private final SenderSettleMode senderSettleMode; private final ReceiverSettleMode receiverSettleMode; private final Duration operationTimeout; private final Composite subscriptions; private ReactorExecutor executor; private volatile ClaimsBasedSecurityChannel cbsChannel; private volatile AmqpChannelProcessor<RequestResponseChannel> cbsChannelProcessor; private volatile Connection connection; /** * Creates a new AMQP connection that uses proton-j. * * @param connectionId Identifier for the connection. * @param connectionOptions A set of options used to create the AMQP connection. * @param reactorProvider Provides proton-j Reactor instances. * @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events. * @param tokenManagerProvider Provides the appropriate token manager to authorize with CBS node. * @param messageSerializer Serializer to translate objects to and from proton-j {@link Message messages}. * @param senderSettleMode to set as {@link SenderSettleMode} on sender. * @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver. */ public ReactorConnection(String connectionId, ConnectionOptions connectionOptions, ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider, TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode) { this.connectionOptions = connectionOptions; this.reactorProvider = reactorProvider; this.connectionId = connectionId; this.logger = new ClientLogger(ReactorConnection.class, createContextWithConnectionId(connectionId)); this.handlerProvider = handlerProvider; this.tokenManagerProvider = Objects.requireNonNull(tokenManagerProvider, "'tokenManagerProvider' cannot be null."); this.messageSerializer = messageSerializer; this.handler = handlerProvider.createConnectionHandler(connectionId, connectionOptions); this.retryPolicy = RetryUtil.getRetryPolicy(connectionOptions.getRetry()); this.operationTimeout = connectionOptions.getRetry().getTryTimeout(); this.senderSettleMode = senderSettleMode; this.receiverSettleMode = receiverSettleMode; this.connectionMono = Mono.fromCallable(this::getOrCreateConnection) .flatMap(reactorConnection -> { final Mono<AmqpEndpointState> activeEndpoint = getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(operationTimeout, Mono.error(() -> new AmqpException(true, String.format( "Connection '%s' not opened within AmqpRetryOptions.tryTimeout(): %s", connectionId, operationTimeout), handler.getErrorContext()))); return activeEndpoint.thenReturn(reactorConnection); }) .doOnError(error -> { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already disposed: Error occurred while connection was starting.", error); } else { closeAsync(new AmqpShutdownSignal(false, false, "Error occurred while connection was starting. Error: " + error)).subscribe(); } }); this.endpointStates = this.handler.getEndpointStates() .takeUntilOther(shutdownSignalSink.asMono()) .map(state -> { logger.verbose("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .onErrorResume(error -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to error."); return closeAsync(new AmqpShutdownSignal(false, false, error.getMessage())).then(Mono.error(error)); } else { return Mono.error(error); } }) .doOnComplete(() -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to connection close."); closeAsync(new AmqpShutdownSignal(false, false, "Connection handler closed.")).subscribe(); } }) .cache(1); this.subscriptions = Disposables.composite(this.endpointStates.subscribe()); } /** * {@inheritDoc} */ @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } /** * Gets the shutdown signal associated with this connection. When it emits, the underlying connection is closed. * * @return Shutdown signals associated with this connection. It emits a signal when the underlying connection is * closed. */ @Override public Flux<AmqpShutdownSignal> getShutdownSignals() { return shutdownSignalSink.asMono().cache().flux(); } @Override public Mono<AmqpManagementNode> getManagementNode(String entityPath) { return Mono.defer(() -> { if (isDisposed()) { return Mono.error(logger.atError() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(Exceptions.propagate(new IllegalStateException("Connection is disposed. Cannot get management instance.")))); } final AmqpManagementNode existing = managementNodes.get(entityPath); if (existing != null) { return Mono.just(existing); } final TokenManager tokenManager = new AzureTokenManagerProvider(connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()) .getTokenManager(getClaimsBasedSecurityNode(), entityPath); return tokenManager.authorize().thenReturn(managementNodes.compute(entityPath, (key, current) -> { if (current != null) { logger.info("A management node exists already, returning it."); tokenManager.close(); return current; } final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME; final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME; final String address = entityPath + "/" + MANAGEMENT_ADDRESS; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue("address", address) .log("Creating management node."); final AmqpChannelProcessor<RequestResponseChannel> requestResponseChannel = createRequestResponseChannel(sessionName, linkName, address); return new ManagementChannel(requestResponseChannel, getFullyQualifiedNamespace(), entityPath, tokenManager); })); }); } /** * {@inheritDoc} */ @Override public Mono<ClaimsBasedSecurityNode> getClaimsBasedSecurityNode() { return connectionMono.then(Mono.fromCallable(() -> getOrCreateCBSNode())); } @Override public String getId() { return connectionId; } /** * {@inheritDoc} */ @Override public String getFullyQualifiedNamespace() { return handler.getHostname(); } /** * {@inheritDoc} */ @Override public int getMaxFrameSize() { return handler.getMaxFrameSize(); } /** * {@inheritDoc} */ @Override public Map<String, Object> getConnectionProperties() { return handler.getConnectionProperties(); } /** * {@inheritDoc} */ @Override public Mono<AmqpSession> createSession(String sessionName) { return connectionMono.map(connection -> { final SessionSubscription sessionSubscription = sessionMap.computeIfAbsent(sessionName, key -> { final SessionHandler sessionHandler = handlerProvider.createSessionHandler(connectionId, getFullyQualifiedNamespace(), key, connectionOptions.getRetry().getTryTimeout()); final Session session = connection.session(); BaseHandler.setHandler(session, sessionHandler); final AmqpSession amqpSession = createSession(key, session, sessionHandler); final Disposable subscription = amqpSession.getEndpointStates() .subscribe(state -> { }, error -> { if (isDisposed.get()) { return; } logger.atInfo() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Error occurred. Removing and disposing session", error); removeSession(key); }, () -> { if (isDisposed.get()) { return; } logger.atVerbose() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Complete. Removing and disposing session."); removeSession(key); }); return new SessionSubscription(amqpSession, subscription); }); return sessionSubscription; }).flatMap(sessionSubscription -> { final Mono<AmqpEndpointState> activeSession = sessionSubscription.getSession().getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(retryPolicy.getRetryOptions().getTryTimeout(), Mono.error(() -> new AmqpException(true, String.format("connectionId[%s] sessionName[%s] Timeout waiting for session to be active.", connectionId, sessionName), handler.getErrorContext()))); return activeSession.thenReturn(sessionSubscription.getSession()); }); } /** * Creates a new AMQP session with the given parameters. * * @param sessionName Name of the AMQP session. * @param session The reactor session associated with this session. * @param handler Session handler for the reactor session. * * @return A new instance of AMQP session. */ protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) { return new ReactorSession(this, session, handler, sessionName, reactorProvider, handlerProvider, getClaimsBasedSecurityNode(), tokenManagerProvider, messageSerializer, connectionOptions.getRetry()); } /** * {@inheritDoc} */ @Override public boolean removeSession(String sessionName) { if (sessionName == null) { return false; } final SessionSubscription removed = sessionMap.remove(sessionName); if (removed != null) { removed.dispose(); } return removed != null; } @Override public boolean isDisposed() { return isDisposed.get(); } /** * {@inheritDoc} */ @Override public void dispose() { final Duration timeout = operationTimeout.plus(operationTimeout); closeAsync().block(timeout); } /** * Gets the active AMQP connection for this instance. * * @return The AMQP connection. * * @throws AmqpException if the {@link Connection} was not transitioned to an active state within the given * {@link AmqpRetryOptions */ protected Mono<Connection> getReactorConnection() { return connectionMono; } /** * Creates a bidirectional link between the message broker and the client. * * @param sessionName Name of the session. * @param linkName Name of the link. * @param entityPath Address to the message broker. * * @return A new {@link RequestResponseChannel} to communicate with the message broker. */ protected AmqpChannelProcessor<RequestResponseChannel> createRequestResponseChannel(String sessionName, String linkName, String entityPath) { Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); final Flux<RequestResponseChannel> createChannel = createSession(sessionName) .cast(ReactorSession.class) .map(reactorSession -> new RequestResponseChannel(this, getId(), getFullyQualifiedNamespace(), linkName, entityPath, reactorSession.session(), connectionOptions.getRetry(), handlerProvider, reactorProvider, messageSerializer, senderSettleMode, receiverSettleMode)) .doOnNext(e -> { logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .log("Emitting new response channel."); }) .repeat(() -> !this.isDisposed()); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(ENTITY_PATH_KEY, entityPath); return createChannel .subscribeWith(new AmqpChannelProcessor<>(getFullyQualifiedNamespace(), channel -> channel.getEndpointStates(), retryPolicy, loggingContext)); } @Override public Mono<Void> closeAsync() { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already closed. Not disposing again."); return isClosedMono.asMono(); } return closeAsync(new AmqpShutdownSignal(false, true, "Disposed by client.")); } private synchronized void closeConnectionWork() { if (connection == null) { isClosedMono.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atInfo(), signalType, emitResult) .log("Unable to complete closeMono."); return false; }); return; } connection.close(); handler.close(); final ArrayList<Mono<Void>> closingSessions = new ArrayList<>(); sessionMap.values().forEach(link -> closingSessions.add(link.isClosed())); final Mono<Void> closedExecutor = executor != null ? Mono.defer(() -> { synchronized (this) { logger.info("Closing executor."); return executor.closeAsync(); } }) : Mono.empty(); final Mono<Void> closeSessionAndExecutorMono = Mono.when(closingSessions) .timeout(operationTimeout) .onErrorResume(error -> { logger.info("Timed out waiting for all sessions to close."); return Mono.empty(); }) .then(closedExecutor) .then(Mono.fromRunnable(() -> { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit connection closed signal."); return false; }); subscriptions.dispose(); })); subscriptions.add(closeSessionAndExecutorMono.subscribe()); } private synchronized ClaimsBasedSecurityNode getOrCreateCBSNode() { if (cbsChannel == null) { logger.info("Setting CBS channel."); cbsChannelProcessor = createRequestResponseChannel(CBS_SESSION_NAME, CBS_LINK_NAME, CBS_ADDRESS); cbsChannel = new ClaimsBasedSecurityChannel( cbsChannelProcessor, connectionOptions.getTokenCredential(), connectionOptions.getAuthorizationType(), connectionOptions.getRetry()); } return cbsChannel; } private synchronized Connection getOrCreateConnection() throws IOException { if (connection == null) { logger.atInfo() .addKeyValue(HOSTNAME_KEY, handler.getHostname()) .addKeyValue("port", handler.getProtocolPort()) .log("Creating and starting connection."); final Reactor reactor = reactorProvider.createReactor(connectionId, handler.getMaxFrameSize()); connection = reactor.connectionToHost(handler.getHostname(), handler.getProtocolPort(), handler); final ReactorExceptionHandler reactorExceptionHandler = new ReactorExceptionHandler(); final Duration timeoutDivided = connectionOptions.getRetry().getTryTimeout().dividedBy(2); final Duration pendingTasksDuration = ClientConstants.SERVER_BUSY_WAIT_TIME.compareTo(timeoutDivided) < 0 ? ClientConstants.SERVER_BUSY_WAIT_TIME : timeoutDivided; final Scheduler scheduler = Schedulers.newSingle("reactor-executor"); executor = new ReactorExecutor(reactor, scheduler, connectionId, reactorExceptionHandler, pendingTasksDuration, connectionOptions.getFullyQualifiedNamespace()); final Mono<Void> executorCloseMono = Mono.defer(() -> { synchronized (this) { return executor.closeAsync(); } }); reactorProvider.getReactorDispatcher().getShutdownSignal() .flatMap(signal -> { reactorExceptionHandler.onConnectionShutdown(signal); return executorCloseMono; }) .onErrorResume(error -> { reactorExceptionHandler.onConnectionError(error); return executorCloseMono; }) .subscribe(); executor.start(); } return connection; } private final class ReactorExceptionHandler extends AmqpExceptionHandler { private ReactorExceptionHandler() { super(); } @Override public void onConnectionError(Throwable exception) { logger.atInfo() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionError, Starting new reactor", exception); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onReactorError: Disposing."); closeAsync(new AmqpShutdownSignal(false, false, "onReactorError: " + exception.toString())) .subscribe(); } } @Override void onConnectionShutdown(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown. Shutting down."); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown: disposing."); closeAsync(shutdownSignal).subscribe(); } } } private static final class SessionSubscription { private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AmqpSession session; private final Disposable subscription; private SessionSubscription(AmqpSession session, Disposable subscription) { this.session = session; this.subscription = subscription; } private AmqpSession getSession() { return session; } private void dispose() { if (isDisposed.getAndSet(true)) { return; } if (session instanceof ReactorSession) { ((ReactorSession) session).closeAsync("Closing session.", null, true) .subscribe(); } else { session.dispose(); } subscription.dispose(); } private Mono<Void> isClosed() { if (session instanceof ReactorSession) { return ((ReactorSession) session).isClosed(); } else { return Mono.empty(); } } } }
Thanks Kun.. I can clearly see how this change helps us avoid repeated logs in the "explicit close" use case. I mean, we could split the mode of connection-close into two - 1. Explicit closeAsync. 2. Implicit closeAsync. "Explicit close" is when the application "explicitly" invokes `closeAsync()`; at the time of such "explicit close", the underlying amqp-connection (very likely) will be healthy, so the endpoints closure (links -> session -> connection) seamlessly+quickly goes through; this is fast and happy path. The application is likely to finish execution after the explicit close. "Implicit close" is when the library internally invokes `closeAsync()`; this is mainly in the recovery/self-healing route (where we have/had the race cases, receiver hanging, and many reliability issues). This is the route where the connection may be faulted, the dispatcher rejecting invoke, retries etc. I was wondering, are there going to be any negative impact in the "Implicit close" route if we change the order in the shutdown signal emitted? want to make sure we think through various control flows, and if nothing concerning, we're good. \\cc @conniey
Mono<Void> closeAsync(AmqpShutdownSignal shutdownSignal) { final Mono<Void> emitShutDownSignalOperation = Mono.fromRunnable(() -> { addShutdownSignal(logger.atInfo(), shutdownSignal).log("Disposing of ReactorConnection."); final Sinks.EmitResult result = shutdownSignalSink.tryEmitValue(shutdownSignal); if (result.isFailure()) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(EMIT_RESULT_KEY, result) .log("Unable to emit shutdown signal."); } }); final Mono<Void> cbsCloseOperation; if (cbsChannelProcessor != null) { cbsCloseOperation = cbsChannelProcessor.flatMap(channel -> channel.closeAsync()); } else { cbsCloseOperation = Mono.empty(); } final Mono<Void> managementNodeCloseOperations = Mono.when( Flux.fromStream(managementNodes.values().stream()).flatMap(node -> node.closeAsync())); final Mono<Void> closeReactor = Mono.fromRunnable(() -> { logger.verbose("Scheduling closeConnection work."); final ReactorDispatcher dispatcher = reactorProvider.getReactorDispatcher(); if (dispatcher != null) { try { dispatcher.invoke(() -> closeConnectionWork()); } catch (IOException e) { logger.warning("IOException while scheduling closeConnection work. Manually disposing.", e); closeConnectionWork(); } catch (RejectedExecutionException e) { logger.info("Could not schedule closeConnection work. Manually disposing."); closeConnectionWork(); } } else { closeConnectionWork(); } }); return Mono.whenDelayError( cbsCloseOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed CBS node.")), managementNodeCloseOperations.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed management nodes."))) .then(emitShutDownSignalOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Emitted connection shutdown signal. "))) .then(closeReactor.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed reactor dispatcher."))) .then(isClosedMono.asMono()); }
logger.atVerbose()
Mono<Void> closeAsync(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal).log("Disposing of ReactorConnection."); final Sinks.EmitResult result = shutdownSignalSink.tryEmitValue(shutdownSignal); if (result.isFailure()) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(EMIT_RESULT_KEY, result) .log("Unable to emit shutdown signal."); } final Mono<Void> cbsCloseOperation; if (cbsChannelProcessor != null) { cbsCloseOperation = cbsChannelProcessor.flatMap(channel -> channel.closeAsync()); } else { cbsCloseOperation = Mono.empty(); } final Mono<Void> managementNodeCloseOperations = Mono.when( Flux.fromStream(managementNodes.values().stream()).flatMap(node -> node.closeAsync())); final Mono<Void> closeReactor = Mono.fromRunnable(() -> { logger.verbose("Scheduling closeConnection work."); final ReactorDispatcher dispatcher = reactorProvider.getReactorDispatcher(); if (dispatcher != null) { try { dispatcher.invoke(() -> closeConnectionWork()); } catch (IOException e) { logger.warning("IOException while scheduling closeConnection work. Manually disposing.", e); closeConnectionWork(); } catch (RejectedExecutionException e) { logger.info("Could not schedule closeConnection work. Manually disposing."); closeConnectionWork(); } } else { closeConnectionWork(); } }); return Mono.whenDelayError( cbsCloseOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed CBS node.")), managementNodeCloseOperations.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed management nodes."))) .then(closeReactor.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed reactor dispatcher."))) .then(isClosedMono.asMono()); }
class ReactorConnection implements AmqpConnection { private static final String CBS_SESSION_NAME = "cbs-session"; private static final String CBS_ADDRESS = "$cbs"; private static final String CBS_LINK_NAME = "cbs"; private static final String MANAGEMENT_SESSION_NAME = "mgmt-session"; private static final String MANAGEMENT_ADDRESS = "$management"; private static final String MANAGEMENT_LINK_NAME = "mgmt"; private final ClientLogger logger; private final ConcurrentMap<String, SessionSubscription> sessionMap = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, AmqpManagementNode> managementNodes = new ConcurrentHashMap<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.One<AmqpShutdownSignal> shutdownSignalSink = Sinks.one(); private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final String connectionId; private final Mono<Connection> connectionMono; private final ConnectionHandler handler; private final ReactorHandlerProvider handlerProvider; private final TokenManagerProvider tokenManagerProvider; private final MessageSerializer messageSerializer; private final ConnectionOptions connectionOptions; private final ReactorProvider reactorProvider; private final AmqpRetryPolicy retryPolicy; private final SenderSettleMode senderSettleMode; private final ReceiverSettleMode receiverSettleMode; private final Duration operationTimeout; private final Composite subscriptions; private ReactorExecutor executor; private volatile ClaimsBasedSecurityChannel cbsChannel; private volatile AmqpChannelProcessor<RequestResponseChannel> cbsChannelProcessor; private volatile Connection connection; /** * Creates a new AMQP connection that uses proton-j. * * @param connectionId Identifier for the connection. * @param connectionOptions A set of options used to create the AMQP connection. * @param reactorProvider Provides proton-j Reactor instances. * @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events. * @param tokenManagerProvider Provides the appropriate token manager to authorize with CBS node. * @param messageSerializer Serializer to translate objects to and from proton-j {@link Message messages}. * @param senderSettleMode to set as {@link SenderSettleMode} on sender. * @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver. */ public ReactorConnection(String connectionId, ConnectionOptions connectionOptions, ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider, TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode) { this.connectionOptions = connectionOptions; this.reactorProvider = reactorProvider; this.connectionId = connectionId; this.logger = new ClientLogger(ReactorConnection.class, createContextWithConnectionId(connectionId)); this.handlerProvider = handlerProvider; this.tokenManagerProvider = Objects.requireNonNull(tokenManagerProvider, "'tokenManagerProvider' cannot be null."); this.messageSerializer = messageSerializer; this.handler = handlerProvider.createConnectionHandler(connectionId, connectionOptions); this.retryPolicy = RetryUtil.getRetryPolicy(connectionOptions.getRetry()); this.operationTimeout = connectionOptions.getRetry().getTryTimeout(); this.senderSettleMode = senderSettleMode; this.receiverSettleMode = receiverSettleMode; this.connectionMono = Mono.fromCallable(this::getOrCreateConnection) .flatMap(reactorConnection -> { final Mono<AmqpEndpointState> activeEndpoint = getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(operationTimeout, Mono.error(() -> new AmqpException(true, String.format( "Connection '%s' not opened within AmqpRetryOptions.tryTimeout(): %s", connectionId, operationTimeout), handler.getErrorContext()))); return activeEndpoint.thenReturn(reactorConnection); }) .doOnError(error -> { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already disposed: Error occurred while connection was starting.", error); } else { closeAsync(new AmqpShutdownSignal(false, false, "Error occurred while connection was starting. Error: " + error)).subscribe(); } }); this.endpointStates = this.handler.getEndpointStates() .takeUntilOther(shutdownSignalSink.asMono()) .map(state -> { logger.verbose("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .onErrorResume(error -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to error."); return closeAsync(new AmqpShutdownSignal(false, false, error.getMessage())).then(Mono.error(error)); } else { return Mono.error(error); } }) .doOnComplete(() -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to connection close."); closeAsync(new AmqpShutdownSignal(false, false, "Connection handler closed.")).subscribe(); } }) .cache(1); this.subscriptions = Disposables.composite(this.endpointStates.subscribe()); } /** * {@inheritDoc} */ @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } /** * Gets the shutdown signal associated with this connection. When it emits, the underlying connection is closed. * * @return Shutdown signals associated with this connection. It emits a signal when the underlying connection is * closed. */ @Override public Flux<AmqpShutdownSignal> getShutdownSignals() { return shutdownSignalSink.asMono().cache().flux(); } @Override public Mono<AmqpManagementNode> getManagementNode(String entityPath) { return Mono.defer(() -> { if (isDisposed()) { return Mono.error(logger.atError() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(Exceptions.propagate(new IllegalStateException("Connection is disposed. Cannot get management instance.")))); } final AmqpManagementNode existing = managementNodes.get(entityPath); if (existing != null) { return Mono.just(existing); } final TokenManager tokenManager = new AzureTokenManagerProvider(connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()) .getTokenManager(getClaimsBasedSecurityNode(), entityPath); return tokenManager.authorize().thenReturn(managementNodes.compute(entityPath, (key, current) -> { if (current != null) { logger.info("A management node exists already, returning it."); tokenManager.close(); return current; } final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME; final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME; final String address = entityPath + "/" + MANAGEMENT_ADDRESS; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue("address", address) .log("Creating management node."); final AmqpChannelProcessor<RequestResponseChannel> requestResponseChannel = createRequestResponseChannel(sessionName, linkName, address); return new ManagementChannel(requestResponseChannel, getFullyQualifiedNamespace(), entityPath, tokenManager); })); }); } /** * {@inheritDoc} */ @Override public Mono<ClaimsBasedSecurityNode> getClaimsBasedSecurityNode() { return connectionMono.then(Mono.fromCallable(() -> getOrCreateCBSNode())); } @Override public String getId() { return connectionId; } /** * {@inheritDoc} */ @Override public String getFullyQualifiedNamespace() { return handler.getHostname(); } /** * {@inheritDoc} */ @Override public int getMaxFrameSize() { return handler.getMaxFrameSize(); } /** * {@inheritDoc} */ @Override public Map<String, Object> getConnectionProperties() { return handler.getConnectionProperties(); } /** * {@inheritDoc} */ @Override public Mono<AmqpSession> createSession(String sessionName) { return connectionMono.map(connection -> { final SessionSubscription sessionSubscription = sessionMap.computeIfAbsent(sessionName, key -> { final SessionHandler sessionHandler = handlerProvider.createSessionHandler(connectionId, getFullyQualifiedNamespace(), key, connectionOptions.getRetry().getTryTimeout()); final Session session = connection.session(); BaseHandler.setHandler(session, sessionHandler); final AmqpSession amqpSession = createSession(key, session, sessionHandler); final Disposable subscription = amqpSession.getEndpointStates() .subscribe(state -> { }, error -> { if (isDisposed.get()) { return; } logger.atInfo() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Error occurred. Removing and disposing session", error); removeSession(key); }, () -> { if (isDisposed.get()) { return; } logger.atVerbose() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Complete. Removing and disposing session."); removeSession(key); }); return new SessionSubscription(amqpSession, subscription); }); return sessionSubscription; }).flatMap(sessionSubscription -> { final Mono<AmqpEndpointState> activeSession = sessionSubscription.getSession().getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(retryPolicy.getRetryOptions().getTryTimeout(), Mono.error(() -> new AmqpException(true, String.format("connectionId[%s] sessionName[%s] Timeout waiting for session to be active.", connectionId, sessionName), handler.getErrorContext()))); return activeSession.thenReturn(sessionSubscription.getSession()); }); } /** * Creates a new AMQP session with the given parameters. * * @param sessionName Name of the AMQP session. * @param session The reactor session associated with this session. * @param handler Session handler for the reactor session. * * @return A new instance of AMQP session. */ protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) { return new ReactorSession(this, session, handler, sessionName, reactorProvider, handlerProvider, getClaimsBasedSecurityNode(), tokenManagerProvider, messageSerializer, connectionOptions.getRetry()); } /** * {@inheritDoc} */ @Override public boolean removeSession(String sessionName) { if (sessionName == null) { return false; } final SessionSubscription removed = sessionMap.remove(sessionName); if (removed != null) { removed.dispose(); } return removed != null; } @Override public boolean isDisposed() { return isDisposed.get(); } /** * {@inheritDoc} */ @Override public void dispose() { final Duration timeout = operationTimeout.plus(operationTimeout); closeAsync().block(timeout); } /** * Gets the active AMQP connection for this instance. * * @return The AMQP connection. * * @throws AmqpException if the {@link Connection} was not transitioned to an active state within the given * {@link AmqpRetryOptions */ protected Mono<Connection> getReactorConnection() { return connectionMono; } /** * Creates a bidirectional link between the message broker and the client. * * @param sessionName Name of the session. * @param linkName Name of the link. * @param entityPath Address to the message broker. * * @return A new {@link RequestResponseChannel} to communicate with the message broker. */ protected AmqpChannelProcessor<RequestResponseChannel> createRequestResponseChannel(String sessionName, String linkName, String entityPath) { Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); final Flux<RequestResponseChannel> createChannel = createSession(sessionName) .cast(ReactorSession.class) .map(reactorSession -> new RequestResponseChannel(this, getId(), getFullyQualifiedNamespace(), linkName, entityPath, reactorSession.session(), connectionOptions.getRetry(), handlerProvider, reactorProvider, messageSerializer, senderSettleMode, receiverSettleMode)) .doOnNext(e -> { logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .log("Emitting new response channel."); }) .repeat() .takeUntilOther(shutdownSignalSink.asMono()); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(ENTITY_PATH_KEY, entityPath); return createChannel .subscribeWith(new AmqpChannelProcessor<>(getFullyQualifiedNamespace(), channel -> channel.getEndpointStates(), retryPolicy, loggingContext)); } @Override public Mono<Void> closeAsync() { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already closed. Not disposing again."); return isClosedMono.asMono(); } return closeAsync(new AmqpShutdownSignal(false, true, "Disposed by client.")); } private synchronized void closeConnectionWork() { if (connection == null) { isClosedMono.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atInfo(), signalType, emitResult) .log("Unable to complete closeMono."); return false; }); return; } connection.close(); handler.close(); final ArrayList<Mono<Void>> closingSessions = new ArrayList<>(); sessionMap.values().forEach(link -> closingSessions.add(link.isClosed())); final Mono<Void> closedExecutor = executor != null ? Mono.defer(() -> { synchronized (this) { logger.info("Closing executor."); return executor.closeAsync(); } }) : Mono.empty(); final Mono<Void> closeSessionAndExecutorMono = Mono.when(closingSessions) .timeout(operationTimeout) .onErrorResume(error -> { logger.info("Timed out waiting for all sessions to close."); return Mono.empty(); }) .then(closedExecutor) .then(Mono.fromRunnable(() -> { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit connection closed signal."); return false; }); subscriptions.dispose(); })); subscriptions.add(closeSessionAndExecutorMono.subscribe()); } private synchronized ClaimsBasedSecurityNode getOrCreateCBSNode() { if (cbsChannel == null) { logger.info("Setting CBS channel."); cbsChannelProcessor = createRequestResponseChannel(CBS_SESSION_NAME, CBS_LINK_NAME, CBS_ADDRESS); cbsChannel = new ClaimsBasedSecurityChannel( cbsChannelProcessor, connectionOptions.getTokenCredential(), connectionOptions.getAuthorizationType(), connectionOptions.getRetry()); } return cbsChannel; } private synchronized Connection getOrCreateConnection() throws IOException { if (connection == null) { logger.atInfo() .addKeyValue(HOSTNAME_KEY, handler.getHostname()) .addKeyValue("port", handler.getProtocolPort()) .log("Creating and starting connection."); final Reactor reactor = reactorProvider.createReactor(connectionId, handler.getMaxFrameSize()); connection = reactor.connectionToHost(handler.getHostname(), handler.getProtocolPort(), handler); final ReactorExceptionHandler reactorExceptionHandler = new ReactorExceptionHandler(); final Duration timeoutDivided = connectionOptions.getRetry().getTryTimeout().dividedBy(2); final Duration pendingTasksDuration = ClientConstants.SERVER_BUSY_WAIT_TIME.compareTo(timeoutDivided) < 0 ? ClientConstants.SERVER_BUSY_WAIT_TIME : timeoutDivided; final Scheduler scheduler = Schedulers.newSingle("reactor-executor"); executor = new ReactorExecutor(reactor, scheduler, connectionId, reactorExceptionHandler, pendingTasksDuration, connectionOptions.getFullyQualifiedNamespace()); final Mono<Void> executorCloseMono = Mono.defer(() -> { synchronized (this) { return executor.closeAsync(); } }); reactorProvider.getReactorDispatcher().getShutdownSignal() .flatMap(signal -> { reactorExceptionHandler.onConnectionShutdown(signal); return executorCloseMono; }) .onErrorResume(error -> { reactorExceptionHandler.onConnectionError(error); return executorCloseMono; }) .subscribe(); executor.start(); } return connection; } private final class ReactorExceptionHandler extends AmqpExceptionHandler { private ReactorExceptionHandler() { super(); } @Override public void onConnectionError(Throwable exception) { logger.atInfo() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionError, Starting new reactor", exception); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onReactorError: Disposing."); closeAsync(new AmqpShutdownSignal(false, false, "onReactorError: " + exception.toString())) .subscribe(); } } @Override void onConnectionShutdown(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown. Shutting down."); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown: disposing."); closeAsync(shutdownSignal).subscribe(); } } } private static final class SessionSubscription { private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AmqpSession session; private final Disposable subscription; private SessionSubscription(AmqpSession session, Disposable subscription) { this.session = session; this.subscription = subscription; } private AmqpSession getSession() { return session; } private void dispose() { if (isDisposed.getAndSet(true)) { return; } if (session instanceof ReactorSession) { ((ReactorSession) session).closeAsync("Closing session.", null, true) .subscribe(); } else { session.dispose(); } subscription.dispose(); } private Mono<Void> isClosed() { if (session instanceof ReactorSession) { return ((ReactorSession) session).isClosed(); } else { return Mono.empty(); } } } }
class ReactorConnection implements AmqpConnection { private static final String CBS_SESSION_NAME = "cbs-session"; private static final String CBS_ADDRESS = "$cbs"; private static final String CBS_LINK_NAME = "cbs"; private static final String MANAGEMENT_SESSION_NAME = "mgmt-session"; private static final String MANAGEMENT_ADDRESS = "$management"; private static final String MANAGEMENT_LINK_NAME = "mgmt"; private final ClientLogger logger; private final ConcurrentMap<String, SessionSubscription> sessionMap = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, AmqpManagementNode> managementNodes = new ConcurrentHashMap<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.One<AmqpShutdownSignal> shutdownSignalSink = Sinks.one(); private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final String connectionId; private final Mono<Connection> connectionMono; private final ConnectionHandler handler; private final ReactorHandlerProvider handlerProvider; private final TokenManagerProvider tokenManagerProvider; private final MessageSerializer messageSerializer; private final ConnectionOptions connectionOptions; private final ReactorProvider reactorProvider; private final AmqpRetryPolicy retryPolicy; private final SenderSettleMode senderSettleMode; private final ReceiverSettleMode receiverSettleMode; private final Duration operationTimeout; private final Composite subscriptions; private ReactorExecutor executor; private volatile ClaimsBasedSecurityChannel cbsChannel; private volatile AmqpChannelProcessor<RequestResponseChannel> cbsChannelProcessor; private volatile Connection connection; /** * Creates a new AMQP connection that uses proton-j. * * @param connectionId Identifier for the connection. * @param connectionOptions A set of options used to create the AMQP connection. * @param reactorProvider Provides proton-j Reactor instances. * @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events. * @param tokenManagerProvider Provides the appropriate token manager to authorize with CBS node. * @param messageSerializer Serializer to translate objects to and from proton-j {@link Message messages}. * @param senderSettleMode to set as {@link SenderSettleMode} on sender. * @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver. */ public ReactorConnection(String connectionId, ConnectionOptions connectionOptions, ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider, TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode) { this.connectionOptions = connectionOptions; this.reactorProvider = reactorProvider; this.connectionId = connectionId; this.logger = new ClientLogger(ReactorConnection.class, createContextWithConnectionId(connectionId)); this.handlerProvider = handlerProvider; this.tokenManagerProvider = Objects.requireNonNull(tokenManagerProvider, "'tokenManagerProvider' cannot be null."); this.messageSerializer = messageSerializer; this.handler = handlerProvider.createConnectionHandler(connectionId, connectionOptions); this.retryPolicy = RetryUtil.getRetryPolicy(connectionOptions.getRetry()); this.operationTimeout = connectionOptions.getRetry().getTryTimeout(); this.senderSettleMode = senderSettleMode; this.receiverSettleMode = receiverSettleMode; this.connectionMono = Mono.fromCallable(this::getOrCreateConnection) .flatMap(reactorConnection -> { final Mono<AmqpEndpointState> activeEndpoint = getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(operationTimeout, Mono.error(() -> new AmqpException(true, String.format( "Connection '%s' not opened within AmqpRetryOptions.tryTimeout(): %s", connectionId, operationTimeout), handler.getErrorContext()))); return activeEndpoint.thenReturn(reactorConnection); }) .doOnError(error -> { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already disposed: Error occurred while connection was starting.", error); } else { closeAsync(new AmqpShutdownSignal(false, false, "Error occurred while connection was starting. Error: " + error)).subscribe(); } }); this.endpointStates = this.handler.getEndpointStates() .takeUntilOther(shutdownSignalSink.asMono()) .map(state -> { logger.verbose("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .onErrorResume(error -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to error."); return closeAsync(new AmqpShutdownSignal(false, false, error.getMessage())).then(Mono.error(error)); } else { return Mono.error(error); } }) .doOnComplete(() -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to connection close."); closeAsync(new AmqpShutdownSignal(false, false, "Connection handler closed.")).subscribe(); } }) .cache(1); this.subscriptions = Disposables.composite(this.endpointStates.subscribe()); } /** * {@inheritDoc} */ @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } /** * Gets the shutdown signal associated with this connection. When it emits, the underlying connection is closed. * * @return Shutdown signals associated with this connection. It emits a signal when the underlying connection is * closed. */ @Override public Flux<AmqpShutdownSignal> getShutdownSignals() { return shutdownSignalSink.asMono().cache().flux(); } @Override public Mono<AmqpManagementNode> getManagementNode(String entityPath) { return Mono.defer(() -> { if (isDisposed()) { return Mono.error(logger.atError() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(Exceptions.propagate(new IllegalStateException("Connection is disposed. Cannot get management instance.")))); } final AmqpManagementNode existing = managementNodes.get(entityPath); if (existing != null) { return Mono.just(existing); } final TokenManager tokenManager = new AzureTokenManagerProvider(connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()) .getTokenManager(getClaimsBasedSecurityNode(), entityPath); return tokenManager.authorize().thenReturn(managementNodes.compute(entityPath, (key, current) -> { if (current != null) { logger.info("A management node exists already, returning it."); tokenManager.close(); return current; } final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME; final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME; final String address = entityPath + "/" + MANAGEMENT_ADDRESS; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue("address", address) .log("Creating management node."); final AmqpChannelProcessor<RequestResponseChannel> requestResponseChannel = createRequestResponseChannel(sessionName, linkName, address); return new ManagementChannel(requestResponseChannel, getFullyQualifiedNamespace(), entityPath, tokenManager); })); }); } /** * {@inheritDoc} */ @Override public Mono<ClaimsBasedSecurityNode> getClaimsBasedSecurityNode() { return connectionMono.then(Mono.fromCallable(() -> getOrCreateCBSNode())); } @Override public String getId() { return connectionId; } /** * {@inheritDoc} */ @Override public String getFullyQualifiedNamespace() { return handler.getHostname(); } /** * {@inheritDoc} */ @Override public int getMaxFrameSize() { return handler.getMaxFrameSize(); } /** * {@inheritDoc} */ @Override public Map<String, Object> getConnectionProperties() { return handler.getConnectionProperties(); } /** * {@inheritDoc} */ @Override public Mono<AmqpSession> createSession(String sessionName) { return connectionMono.map(connection -> { final SessionSubscription sessionSubscription = sessionMap.computeIfAbsent(sessionName, key -> { final SessionHandler sessionHandler = handlerProvider.createSessionHandler(connectionId, getFullyQualifiedNamespace(), key, connectionOptions.getRetry().getTryTimeout()); final Session session = connection.session(); BaseHandler.setHandler(session, sessionHandler); final AmqpSession amqpSession = createSession(key, session, sessionHandler); final Disposable subscription = amqpSession.getEndpointStates() .subscribe(state -> { }, error -> { if (isDisposed.get()) { return; } logger.atInfo() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Error occurred. Removing and disposing session", error); removeSession(key); }, () -> { if (isDisposed.get()) { return; } logger.atVerbose() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Complete. Removing and disposing session."); removeSession(key); }); return new SessionSubscription(amqpSession, subscription); }); return sessionSubscription; }).flatMap(sessionSubscription -> { final Mono<AmqpEndpointState> activeSession = sessionSubscription.getSession().getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(retryPolicy.getRetryOptions().getTryTimeout(), Mono.error(() -> new AmqpException(true, String.format("connectionId[%s] sessionName[%s] Timeout waiting for session to be active.", connectionId, sessionName), handler.getErrorContext()))); return activeSession.thenReturn(sessionSubscription.getSession()); }); } /** * Creates a new AMQP session with the given parameters. * * @param sessionName Name of the AMQP session. * @param session The reactor session associated with this session. * @param handler Session handler for the reactor session. * * @return A new instance of AMQP session. */ protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) { return new ReactorSession(this, session, handler, sessionName, reactorProvider, handlerProvider, getClaimsBasedSecurityNode(), tokenManagerProvider, messageSerializer, connectionOptions.getRetry()); } /** * {@inheritDoc} */ @Override public boolean removeSession(String sessionName) { if (sessionName == null) { return false; } final SessionSubscription removed = sessionMap.remove(sessionName); if (removed != null) { removed.dispose(); } return removed != null; } @Override public boolean isDisposed() { return isDisposed.get(); } /** * {@inheritDoc} */ @Override public void dispose() { final Duration timeout = operationTimeout.plus(operationTimeout); closeAsync().block(timeout); } /** * Gets the active AMQP connection for this instance. * * @return The AMQP connection. * * @throws AmqpException if the {@link Connection} was not transitioned to an active state within the given * {@link AmqpRetryOptions */ protected Mono<Connection> getReactorConnection() { return connectionMono; } /** * Creates a bidirectional link between the message broker and the client. * * @param sessionName Name of the session. * @param linkName Name of the link. * @param entityPath Address to the message broker. * * @return A new {@link RequestResponseChannel} to communicate with the message broker. */ protected AmqpChannelProcessor<RequestResponseChannel> createRequestResponseChannel(String sessionName, String linkName, String entityPath) { Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); final Flux<RequestResponseChannel> createChannel = createSession(sessionName) .cast(ReactorSession.class) .map(reactorSession -> new RequestResponseChannel(this, getId(), getFullyQualifiedNamespace(), linkName, entityPath, reactorSession.session(), connectionOptions.getRetry(), handlerProvider, reactorProvider, messageSerializer, senderSettleMode, receiverSettleMode)) .doOnNext(e -> { logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .log("Emitting new response channel."); }) .repeat(() -> !this.isDisposed()); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(ENTITY_PATH_KEY, entityPath); return createChannel .subscribeWith(new AmqpChannelProcessor<>(getFullyQualifiedNamespace(), channel -> channel.getEndpointStates(), retryPolicy, loggingContext)); } @Override public Mono<Void> closeAsync() { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already closed. Not disposing again."); return isClosedMono.asMono(); } return closeAsync(new AmqpShutdownSignal(false, true, "Disposed by client.")); } private synchronized void closeConnectionWork() { if (connection == null) { isClosedMono.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atInfo(), signalType, emitResult) .log("Unable to complete closeMono."); return false; }); return; } connection.close(); handler.close(); final ArrayList<Mono<Void>> closingSessions = new ArrayList<>(); sessionMap.values().forEach(link -> closingSessions.add(link.isClosed())); final Mono<Void> closedExecutor = executor != null ? Mono.defer(() -> { synchronized (this) { logger.info("Closing executor."); return executor.closeAsync(); } }) : Mono.empty(); final Mono<Void> closeSessionAndExecutorMono = Mono.when(closingSessions) .timeout(operationTimeout) .onErrorResume(error -> { logger.info("Timed out waiting for all sessions to close."); return Mono.empty(); }) .then(closedExecutor) .then(Mono.fromRunnable(() -> { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit connection closed signal."); return false; }); subscriptions.dispose(); })); subscriptions.add(closeSessionAndExecutorMono.subscribe()); } private synchronized ClaimsBasedSecurityNode getOrCreateCBSNode() { if (cbsChannel == null) { logger.info("Setting CBS channel."); cbsChannelProcessor = createRequestResponseChannel(CBS_SESSION_NAME, CBS_LINK_NAME, CBS_ADDRESS); cbsChannel = new ClaimsBasedSecurityChannel( cbsChannelProcessor, connectionOptions.getTokenCredential(), connectionOptions.getAuthorizationType(), connectionOptions.getRetry()); } return cbsChannel; } private synchronized Connection getOrCreateConnection() throws IOException { if (connection == null) { logger.atInfo() .addKeyValue(HOSTNAME_KEY, handler.getHostname()) .addKeyValue("port", handler.getProtocolPort()) .log("Creating and starting connection."); final Reactor reactor = reactorProvider.createReactor(connectionId, handler.getMaxFrameSize()); connection = reactor.connectionToHost(handler.getHostname(), handler.getProtocolPort(), handler); final ReactorExceptionHandler reactorExceptionHandler = new ReactorExceptionHandler(); final Duration timeoutDivided = connectionOptions.getRetry().getTryTimeout().dividedBy(2); final Duration pendingTasksDuration = ClientConstants.SERVER_BUSY_WAIT_TIME.compareTo(timeoutDivided) < 0 ? ClientConstants.SERVER_BUSY_WAIT_TIME : timeoutDivided; final Scheduler scheduler = Schedulers.newSingle("reactor-executor"); executor = new ReactorExecutor(reactor, scheduler, connectionId, reactorExceptionHandler, pendingTasksDuration, connectionOptions.getFullyQualifiedNamespace()); final Mono<Void> executorCloseMono = Mono.defer(() -> { synchronized (this) { return executor.closeAsync(); } }); reactorProvider.getReactorDispatcher().getShutdownSignal() .flatMap(signal -> { reactorExceptionHandler.onConnectionShutdown(signal); return executorCloseMono; }) .onErrorResume(error -> { reactorExceptionHandler.onConnectionError(error); return executorCloseMono; }) .subscribe(); executor.start(); } return connection; } private final class ReactorExceptionHandler extends AmqpExceptionHandler { private ReactorExceptionHandler() { super(); } @Override public void onConnectionError(Throwable exception) { logger.atInfo() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionError, Starting new reactor", exception); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onReactorError: Disposing."); closeAsync(new AmqpShutdownSignal(false, false, "onReactorError: " + exception.toString())) .subscribe(); } } @Override void onConnectionShutdown(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown. Shutting down."); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown: disposing."); closeAsync(shutdownSignal).subscribe(); } } } private static final class SessionSubscription { private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AmqpSession session; private final Disposable subscription; private SessionSubscription(AmqpSession session, Disposable subscription) { this.session = session; this.subscription = subscription; } private AmqpSession getSession() { return session; } private void dispose() { if (isDisposed.getAndSet(true)) { return; } if (session instanceof ReactorSession) { ((ReactorSession) session).closeAsync("Closing session.", null, true) .subscribe(); } else { session.dispose(); } subscription.dispose(); } private Mono<Void> isClosed() { if (session instanceof ReactorSession) { return ((ReactorSession) session).isClosed(); } else { return Mono.empty(); } } } }
well-known?
public KeyVaultKeyStore() { LOGGER.log(FINE, "Constructing KeyVaultKeyStore."); creationDate = new Date(); String keyVaultUri = System.getProperty("azure.keyvault.uri"); String tenantId = System.getProperty("azure.keyvault.tenant-id"); String clientId = System.getProperty("azure.keyvault.client-id"); String clientSecret = System.getProperty("azure.keyvault.client-secret"); String managedIdentity = System.getProperty("azure.keyvault.managed-identity"); long refreshInterval = getRefreshInterval(); refreshCertificatesWhenHaveUnTrustCertificate = Optional.of("azure.keyvault.jca.refresh-certificates-when-have-un-trust-certificate") .map(System::getProperty) .map(Boolean::parseBoolean) .orElse(false); jreCertificates = JreCertificates.getInstance(); LOGGER.log(FINE, String.format("Loaded jre certificates: %s.", jreCertificates.getAliases())); wellKnowCertificates = SpecificPathCertificates.getSpecificPathCertificates(wellKnowPath); LOGGER.log(FINE, String.format("Loaded well known certificates: %s.", wellKnowCertificates.getAliases())); customCertificates = SpecificPathCertificates.getSpecificPathCertificates(customPath); LOGGER.log(FINE, String.format("Loaded custom certificates: %s.", customCertificates.getAliases())); keyVaultCertificates = new KeyVaultCertificates( refreshInterval, keyVaultUri, tenantId, clientId, clientSecret, managedIdentity); LOGGER.log(FINE, String.format("Loaded Key Vault certificates: %s.", keyVaultCertificates.getAliases())); classpathCertificates = new ClasspathCertificates(); LOGGER.log(FINE, String.format("Loaded classpath certificates: %s.", classpathCertificates.getAliases())); allCertificates = Arrays.asList( jreCertificates, wellKnowCertificates, customCertificates, keyVaultCertificates, classpathCertificates); }
LOGGER.log(FINE, String.format("Loaded well known certificates: %s.", wellKnowCertificates.getAliases()));
public KeyVaultKeyStore() { LOGGER.log(FINE, "Constructing KeyVaultKeyStore."); creationDate = new Date(); String keyVaultUri = System.getProperty("azure.keyvault.uri"); String tenantId = System.getProperty("azure.keyvault.tenant-id"); String clientId = System.getProperty("azure.keyvault.client-id"); String clientSecret = System.getProperty("azure.keyvault.client-secret"); String managedIdentity = System.getProperty("azure.keyvault.managed-identity"); long refreshInterval = getRefreshInterval(); refreshCertificatesWhenHaveUnTrustCertificate = Optional.of("azure.keyvault.jca.refresh-certificates-when-have-un-trust-certificate") .map(System::getProperty) .map(Boolean::parseBoolean) .orElse(false); jreCertificates = JreCertificates.getInstance(); LOGGER.log(FINE, String.format("Loaded jre certificates: %s.", jreCertificates.getAliases())); wellKnowCertificates = SpecificPathCertificates.getSpecificPathCertificates(wellKnowPath); LOGGER.log(FINE, String.format("Loaded well known certificates: %s.", wellKnowCertificates.getAliases())); customCertificates = SpecificPathCertificates.getSpecificPathCertificates(customPath); LOGGER.log(FINE, String.format("Loaded custom certificates: %s.", customCertificates.getAliases())); keyVaultCertificates = new KeyVaultCertificates( refreshInterval, keyVaultUri, tenantId, clientId, clientSecret, managedIdentity); LOGGER.log(FINE, String.format("Loaded Key Vault certificates: %s.", keyVaultCertificates.getAliases())); classpathCertificates = new ClasspathCertificates(); LOGGER.log(FINE, String.format("Loaded classpath certificates: %s.", classpathCertificates.getAliases())); allCertificates = Arrays.asList( jreCertificates, wellKnowCertificates, customCertificates, keyVaultCertificates, classpathCertificates); }
class KeyVaultKeyStore extends KeyStoreSpi { /** * Stores the key-store name. */ public static final String KEY_STORE_TYPE = "AzureKeyVault"; /** * Stores the algorithm name. */ public static final String ALGORITHM_NAME = KEY_STORE_TYPE; /** * Stores the logger. */ private static final Logger LOGGER = Logger.getLogger(KeyVaultKeyStore.class.getName()); /** * Stores the Jre key store certificates. */ private final JreCertificates jreCertificates; /** * Store well Know certificates loaded from specific path. */ private final SpecificPathCertificates wellKnowCertificates; /** * Store custom certificates loaded from specific path. */ private final SpecificPathCertificates customCertificates; /** * Store certificates loaded from KeyVault. */ private final KeyVaultCertificates keyVaultCertificates; /** * Store certificates loaded from classpath. */ private final ClasspathCertificates classpathCertificates; /** * Stores all the certificates. */ private final List<AzureCertificates> allCertificates; /** * Stores the creation date. */ private final Date creationDate; private final boolean refreshCertificatesWhenHaveUnTrustCertificate; /** * Store the path where the well know certificate is placed */ final String wellKnowPath = Optional.ofNullable(System.getProperty("azure.cert-path.well-known")) .orElse("/etc/certs/well-known/"); /** * Store the path where the custom certificate is placed */ final String customPath = Optional.ofNullable(System.getProperty("azure.cert-path.custom")) .orElse("/etc/certs/custom/"); /** * Constructor. * * <p> * The constructor uses System.getProperty for * <code>azure.keyvault.uri</code>, * <code>azure.keyvault.aadAuthenticationUrl</code>, * <code>azure.keyvault.tenantId</code>, * <code>azure.keyvault.clientId</code>, * <code>azure.keyvault.clientSecret</code> and * <code>azure.keyvault.managedIdentity</code> to initialize the * Key Vault client. * </p> */ Long getRefreshInterval() { return Stream.of("azure.keyvault.jca.certificates-refresh-interval-in-ms", "azure.keyvault.jca.certificates-refresh-interval") .map(System::getProperty) .filter(Objects::nonNull) .map(Long::valueOf) .findFirst() .orElse(0L); } /** * get key vault key store by system property * * @return KeyVault key store * @throws CertificateException if any of the certificates in the * keystore could not be loaded * @throws NoSuchAlgorithmException when algorithm is unavailable. * @throws KeyStoreException when no Provider supports a KeyStoreSpi implementation for the specified type * @throws IOException when an I/O error occurs. */ public static KeyStore getKeyVaultKeyStoreBySystemProperty() throws CertificateException, NoSuchAlgorithmException, KeyStoreException, IOException { KeyStore keyStore = KeyStore.getInstance(KeyVaultJcaProvider.PROVIDER_NAME); KeyVaultLoadStoreParameter parameter = new KeyVaultLoadStoreParameter( System.getProperty("azure.keyvault.uri"), System.getProperty("azure.keyvault.tenant-id"), System.getProperty("azure.keyvault.client-id"), System.getProperty("azure.keyvault.client-secret"), System.getProperty("azure.keyvault.managed-identity")); keyStore.load(parameter); return keyStore; } @Override public Enumeration<String> engineAliases() { return Collections.enumeration(getAllAliases()); } @Override public boolean engineContainsAlias(String alias) { return engineIsCertificateEntry(alias); } @Override public void engineDeleteEntry(String alias) { allCertificates.forEach(a -> a.deleteEntry(alias)); } @Override public boolean engineEntryInstanceOf(String alias, Class<? extends KeyStore.Entry> entryClass) { return super.engineEntryInstanceOf(alias, entryClass); } @Override public Certificate engineGetCertificate(String alias) { Certificate certificate = allCertificates.stream() .map(AzureCertificates::getCertificates) .filter(a -> a.containsKey(alias)) .findFirst() .map(certificates -> certificates.get(alias)) .orElse(null); if (refreshCertificatesWhenHaveUnTrustCertificate && certificate == null) { keyVaultCertificates.refreshCertificates(); certificate = keyVaultCertificates.getCertificates().get(alias); } return certificate; } @Override public String engineGetCertificateAlias(Certificate cert) { String alias = null; if (cert != null) { List<String> aliasList = getAllAliases(); for (String candidateAlias : aliasList) { Certificate certificate = engineGetCertificate(candidateAlias); if (certificate.equals(cert)) { alias = candidateAlias; break; } } } if (refreshCertificatesWhenHaveUnTrustCertificate && alias == null) { alias = keyVaultCertificates.refreshAndGetAliasByCertificate(cert); } return alias; } @Override public Certificate[] engineGetCertificateChain(String alias) { Certificate[] chain = null; Certificate certificate = engineGetCertificate(alias); if (certificate != null) { chain = new Certificate[1]; chain[0] = certificate; } return chain; } @Override public Date engineGetCreationDate(String alias) { return new Date(creationDate.getTime()); } @Override public KeyStore.Entry engineGetEntry(String alias, KeyStore.ProtectionParameter protParam) throws KeyStoreException, NoSuchAlgorithmException, UnrecoverableEntryException { return super.engineGetEntry(alias, protParam); } @Override public Key engineGetKey(String alias, char[] password) { return allCertificates.stream() .map(AzureCertificates::getCertificateKeys) .filter(a -> a.containsKey(alias)) .findFirst() .map(certificateKeys -> certificateKeys.get(alias)) .orElse(null); } @Override public boolean engineIsCertificateEntry(String alias) { return getAllAliases().contains(alias); } @Override public boolean engineIsKeyEntry(String alias) { return engineIsCertificateEntry(alias); } @Override public void engineLoad(KeyStore.LoadStoreParameter param) { if (param instanceof KeyVaultLoadStoreParameter) { KeyVaultLoadStoreParameter parameter = (KeyVaultLoadStoreParameter) param; keyVaultCertificates.updateKeyVaultClient(parameter.getUri(), parameter.getTenantId(), parameter.getClientId(), parameter.getClientSecret(), parameter.getManagedIdentity()); } classpathCertificates.loadCertificatesFromClasspath(); } @Override public void engineLoad(InputStream stream, char[] password) { classpathCertificates.loadCertificatesFromClasspath(); } private List<String> getAllAliases() { List<String> allAliases = new ArrayList<>(jreCertificates.getAliases()); Map<String, List<String>> aliasLists = new HashMap<>(); aliasLists.put("well known certificates", wellKnowCertificates.getAliases()); aliasLists.put("custom certificates", customCertificates.getAliases()); aliasLists.put("key vault certificates", keyVaultCertificates.getAliases()); aliasLists.put("class path certificates", classpathCertificates.getAliases()); aliasLists.forEach((certificatesType, certificates) -> certificates.forEach(alias -> { if (allAliases.contains(alias)) { LOGGER.log(FINE, String.format("The certificate %s under %s already exists", alias, certificatesType)); } else { allAliases.add(alias); } })); return allAliases; } @Override public void engineSetCertificateEntry(String alias, Certificate certificate) { if (getAllAliases().contains(alias)) { LOGGER.log(WARNING, "Cannot overwrite own certificate"); return; } classpathCertificates.setCertificateEntry(alias, certificate); } @Override public void engineSetEntry(String alias, KeyStore.Entry entry, KeyStore.ProtectionParameter protParam) throws KeyStoreException { super.engineSetEntry(alias, entry, protParam); } @Override public void engineSetKeyEntry(String alias, Key key, char[] password, Certificate[] chain) { } @Override public void engineSetKeyEntry(String alias, byte[] key, Certificate[] chain) { } @Override public int engineSize() { return getAllAliases().size(); } @Override public void engineStore(OutputStream stream, char[] password) { } @Override public void engineStore(KeyStore.LoadStoreParameter param) { } }
class KeyVaultKeyStore extends KeyStoreSpi { /** * Stores the key-store name. */ public static final String KEY_STORE_TYPE = "AzureKeyVault"; /** * Stores the algorithm name. */ public static final String ALGORITHM_NAME = KEY_STORE_TYPE; /** * Stores the logger. */ private static final Logger LOGGER = Logger.getLogger(KeyVaultKeyStore.class.getName()); /** * Stores the Jre key store certificates. */ private final JreCertificates jreCertificates; /** * Store well Know certificates loaded from specific path. */ private final SpecificPathCertificates wellKnowCertificates; /** * Store custom certificates loaded from specific path. */ private final SpecificPathCertificates customCertificates; /** * Store certificates loaded from KeyVault. */ private final KeyVaultCertificates keyVaultCertificates; /** * Store certificates loaded from classpath. */ private final ClasspathCertificates classpathCertificates; /** * Stores all the certificates. */ private final List<AzureCertificates> allCertificates; /** * Stores the creation date. */ private final Date creationDate; private final boolean refreshCertificatesWhenHaveUnTrustCertificate; /** * Store the path where the well know certificate is placed */ final String wellKnowPath = Optional.ofNullable(System.getProperty("azure.cert-path.well-known")) .orElse("/etc/certs/well-known/"); /** * Store the path where the custom certificate is placed */ final String customPath = Optional.ofNullable(System.getProperty("azure.cert-path.custom")) .orElse("/etc/certs/custom/"); /** * Constructor. * * <p> * The constructor uses System.getProperty for * <code>azure.keyvault.uri</code>, * <code>azure.keyvault.aadAuthenticationUrl</code>, * <code>azure.keyvault.tenantId</code>, * <code>azure.keyvault.clientId</code>, * <code>azure.keyvault.clientSecret</code> and * <code>azure.keyvault.managedIdentity</code> to initialize the * Key Vault client. * </p> */ Long getRefreshInterval() { return Stream.of("azure.keyvault.jca.certificates-refresh-interval-in-ms", "azure.keyvault.jca.certificates-refresh-interval") .map(System::getProperty) .filter(Objects::nonNull) .map(Long::valueOf) .findFirst() .orElse(0L); } /** * get key vault key store by system property * * @return KeyVault key store * @throws CertificateException if any of the certificates in the * keystore could not be loaded * @throws NoSuchAlgorithmException when algorithm is unavailable. * @throws KeyStoreException when no Provider supports a KeyStoreSpi implementation for the specified type * @throws IOException when an I/O error occurs. */ public static KeyStore getKeyVaultKeyStoreBySystemProperty() throws CertificateException, NoSuchAlgorithmException, KeyStoreException, IOException { KeyStore keyStore = KeyStore.getInstance(KeyVaultJcaProvider.PROVIDER_NAME); KeyVaultLoadStoreParameter parameter = new KeyVaultLoadStoreParameter( System.getProperty("azure.keyvault.uri"), System.getProperty("azure.keyvault.tenant-id"), System.getProperty("azure.keyvault.client-id"), System.getProperty("azure.keyvault.client-secret"), System.getProperty("azure.keyvault.managed-identity")); keyStore.load(parameter); return keyStore; } @Override public Enumeration<String> engineAliases() { return Collections.enumeration(getAllAliases()); } @Override public boolean engineContainsAlias(String alias) { return engineIsCertificateEntry(alias); } @Override public void engineDeleteEntry(String alias) { allCertificates.forEach(a -> a.deleteEntry(alias)); } @Override public boolean engineEntryInstanceOf(String alias, Class<? extends KeyStore.Entry> entryClass) { return super.engineEntryInstanceOf(alias, entryClass); } @Override public Certificate engineGetCertificate(String alias) { Certificate certificate = allCertificates.stream() .map(AzureCertificates::getCertificates) .filter(a -> a.containsKey(alias)) .findFirst() .map(certificates -> certificates.get(alias)) .orElse(null); if (refreshCertificatesWhenHaveUnTrustCertificate && certificate == null) { keyVaultCertificates.refreshCertificates(); certificate = keyVaultCertificates.getCertificates().get(alias); } return certificate; } @Override public String engineGetCertificateAlias(Certificate cert) { String alias = null; if (cert != null) { List<String> aliasList = getAllAliases(); for (String candidateAlias : aliasList) { Certificate certificate = engineGetCertificate(candidateAlias); if (certificate.equals(cert)) { alias = candidateAlias; break; } } } if (refreshCertificatesWhenHaveUnTrustCertificate && alias == null) { alias = keyVaultCertificates.refreshAndGetAliasByCertificate(cert); } return alias; } @Override public Certificate[] engineGetCertificateChain(String alias) { Certificate[] chain = null; Certificate certificate = engineGetCertificate(alias); if (certificate != null) { chain = new Certificate[1]; chain[0] = certificate; } return chain; } @Override public Date engineGetCreationDate(String alias) { return new Date(creationDate.getTime()); } @Override public KeyStore.Entry engineGetEntry(String alias, KeyStore.ProtectionParameter protParam) throws KeyStoreException, NoSuchAlgorithmException, UnrecoverableEntryException { return super.engineGetEntry(alias, protParam); } @Override public Key engineGetKey(String alias, char[] password) { return allCertificates.stream() .map(AzureCertificates::getCertificateKeys) .filter(a -> a.containsKey(alias)) .findFirst() .map(certificateKeys -> certificateKeys.get(alias)) .orElse(null); } @Override public boolean engineIsCertificateEntry(String alias) { return getAllAliases().contains(alias); } @Override public boolean engineIsKeyEntry(String alias) { return engineIsCertificateEntry(alias); } @Override public void engineLoad(KeyStore.LoadStoreParameter param) { if (param instanceof KeyVaultLoadStoreParameter) { KeyVaultLoadStoreParameter parameter = (KeyVaultLoadStoreParameter) param; keyVaultCertificates.updateKeyVaultClient(parameter.getUri(), parameter.getTenantId(), parameter.getClientId(), parameter.getClientSecret(), parameter.getManagedIdentity()); } classpathCertificates.loadCertificatesFromClasspath(); } @Override public void engineLoad(InputStream stream, char[] password) { classpathCertificates.loadCertificatesFromClasspath(); } private List<String> getAllAliases() { List<String> allAliases = new ArrayList<>(jreCertificates.getAliases()); Map<String, List<String>> aliasLists = new HashMap<>(); aliasLists.put("well known certificates", wellKnowCertificates.getAliases()); aliasLists.put("custom certificates", customCertificates.getAliases()); aliasLists.put("key vault certificates", keyVaultCertificates.getAliases()); aliasLists.put("class path certificates", classpathCertificates.getAliases()); aliasLists.forEach((certificatesType, certificates) -> certificates.forEach(alias -> { if (allAliases.contains(alias)) { LOGGER.log(FINE, String.format("The certificate %s under %s already exists", alias, certificatesType)); } else { allAliases.add(alias); } })); return allAliases; } @Override public void engineSetCertificateEntry(String alias, Certificate certificate) { if (getAllAliases().contains(alias)) { LOGGER.log(WARNING, "Cannot overwrite own certificate"); return; } classpathCertificates.setCertificateEntry(alias, certificate); } @Override public void engineSetEntry(String alias, KeyStore.Entry entry, KeyStore.ProtectionParameter protParam) throws KeyStoreException { super.engineSetEntry(alias, entry, protParam); } @Override public void engineSetKeyEntry(String alias, Key key, char[] password, Certificate[] chain) { } @Override public void engineSetKeyEntry(String alias, byte[] key, Certificate[] chain) { } @Override public int engineSize() { return getAllAliases().size(); } @Override public void engineStore(OutputStream stream, char[] password) { } @Override public void engineStore(KeyStore.LoadStoreParameter param) { } }
`put` is deprecated, use `set` instead
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpHeaders requestHeaders = context.getHttpRequest().getHeaders(); EncryptedBlobRange encryptedRange = EncryptedBlobRange.getEncryptedBlobRangeFromHeader( requestHeaders.getValue(CryptographyConstants.RANGE_HEADER)); if (requestHeaders.getValue(CryptographyConstants.RANGE_HEADER) != null) { requestHeaders.set(CryptographyConstants.RANGE_HEADER, encryptedRange.toBlobRange().toString()); } return next.process().flatMap(httpResponse -> { if (httpResponse.getRequest().getHttpMethod() == HttpMethod.GET && httpResponse.getBody() != null) { HttpHeaders responseHeaders = httpResponse.getHeaders(); /* * We will need to know the total size of the data to know when to finalize the decryption. If it was * not set originally with the intent of downloading the whole blob, update it here. */ encryptedRange.setAdjustedDownloadCount(Long.parseLong(responseHeaders.getValue( CryptographyConstants.CONTENT_LENGTH))); /* * We expect padding only if we are at the end of a blob and it is not a multiple of the encryption * block size */ boolean padding = encryptedRange.toBlobRange().getOffset() + encryptedRange.toBlobRange().getCount() > (blobSize(responseHeaders) - ENCRYPTION_BLOCK_SIZE); String encryptedDataString = responseHeaders .getValue(Constants.HeaderConstants.X_MS_META + "-" + CryptographyConstants.ENCRYPTION_DATA_KEY); Flux<ByteBuffer> plainTextData = this.decryptBlob(encryptedDataString, httpResponse.getBody(), encryptedRange, padding); return Mono.just(new BlobDecryptionPolicy.DecryptedResponse(httpResponse, plainTextData)); } else { return Mono.just(httpResponse); } }); }
requestHeaders.set(CryptographyConstants.RANGE_HEADER, encryptedRange.toBlobRange().toString());
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpHeaders requestHeaders = context.getHttpRequest().getHeaders(); EncryptedBlobRange encryptedRange = EncryptedBlobRange.getEncryptedBlobRangeFromHeader( requestHeaders.getValue(CryptographyConstants.RANGE_HEADER)); if (requestHeaders.getValue(CryptographyConstants.RANGE_HEADER) != null) { requestHeaders.set(CryptographyConstants.RANGE_HEADER, encryptedRange.toBlobRange().toString()); } return next.process().flatMap(httpResponse -> { if (httpResponse.getRequest().getHttpMethod() == HttpMethod.GET && httpResponse.getBody() != null) { HttpHeaders responseHeaders = httpResponse.getHeaders(); /* * We will need to know the total size of the data to know when to finalize the decryption. If it was * not set originally with the intent of downloading the whole blob, update it here. */ encryptedRange.setAdjustedDownloadCount(Long.parseLong(responseHeaders.getValue( CryptographyConstants.CONTENT_LENGTH))); /* * We expect padding only if we are at the end of a blob and it is not a multiple of the encryption * block size */ boolean padding = encryptedRange.toBlobRange().getOffset() + encryptedRange.toBlobRange().getCount() > (blobSize(responseHeaders) - ENCRYPTION_BLOCK_SIZE); String encryptedDataString = responseHeaders .getValue(Constants.HeaderConstants.X_MS_META + "-" + CryptographyConstants.ENCRYPTION_DATA_KEY); Flux<ByteBuffer> plainTextData = this.decryptBlob(encryptedDataString, httpResponse.getBody(), encryptedRange, padding); return Mono.just(new BlobDecryptionPolicy.DecryptedResponse(httpResponse, plainTextData)); } else { return Mono.just(httpResponse); } }); }
class with the specified key and resolver. * <p> * If the generated policy is intended to be used for encryption, users are expected to provide a key at the * minimum. The absence of key will cause an exception to be thrown during encryption. If the generated policy is * intended to be used for decryption, users can provide a keyResolver. The client library will - 1. Invoke the key * resolver if specified to get the key. 2. If resolver is not specified but a key is specified, match the key id on * the key and use it. * * @param key An object of type {@link AsyncKeyEncryptionKey}
class with the specified key and resolver. * <p> * If the generated policy is intended to be used for encryption, users are expected to provide a key at the * minimum. The absence of key will cause an exception to be thrown during encryption. If the generated policy is * intended to be used for decryption, users can provide a keyResolver. The client library will - 1. Invoke the key * resolver if specified to get the key. 2. If resolver is not specified but a key is specified, match the key id on * the key and use it. * * @param key An object of type {@link AsyncKeyEncryptionKey}
why did we remove catch blocks somewhere and added them in couple of places?
public Mono<AppendBlobItem> create() { return create(false); }
}
public Mono<AppendBlobItem> create() { return create(false); }
class AppendBlobAsyncClient extends BlobAsyncClientBase { private static final ClientLogger LOGGER = new ClientLogger(AppendBlobAsyncClient.class); /** * Indicates the maximum number of bytes that can be sent in a call to appendBlock. */ public static final int MAX_APPEND_BLOCK_BYTES = 4 * Constants.MB; /** * Indicates the maximum number of blocks allowed in an append blob. */ public static final int MAX_BLOCKS = 50000; /** * Package-private constructor for use by {@link SpecializedBlobClientBuilder}. * * @param pipeline The pipeline used to send and receive service requests. * @param url The endpoint where to send service requests. * @param serviceVersion The version of the service to receive requests. * @param accountName The storage account name. * @param containerName The container name. * @param blobName The blob name. * @param snapshot The snapshot identifier for the blob, pass {@code null} to interact with the blob directly. * @param customerProvidedKey Customer provided key used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. * @param encryptionScope Encryption scope used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. * @param versionId The version identifier for the blob, pass {@code null} to interact with the latest blob version. */ AppendBlobAsyncClient(HttpPipeline pipeline, String url, BlobServiceVersion serviceVersion, String accountName, String containerName, String blobName, String snapshot, CpkInfo customerProvidedKey, EncryptionScope encryptionScope, String versionId) { super(pipeline, url, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey, encryptionScope, versionId); } /** * Creates a new {@link AppendBlobAsyncClient} with the specified {@code encryptionScope}. * * @param encryptionScope the encryption scope for the blob, pass {@code null} to use no encryption scope. * @return a {@link AppendBlobAsyncClient} with the specified {@code encryptionScope}. */ @Override public AppendBlobAsyncClient getEncryptionScopeAsyncClient(String encryptionScope) { EncryptionScope finalEncryptionScope = null; if (encryptionScope != null) { finalEncryptionScope = new EncryptionScope().setEncryptionScope(encryptionScope); } return new AppendBlobAsyncClient(getHttpPipeline(), getAccountUrl(), getServiceVersion(), getAccountName(), getContainerName(), getBlobName(), getSnapshotId(), getCustomerProvidedKey(), finalEncryptionScope, getVersionId()); } /** * Creates a new {@link AppendBlobAsyncClient} with the specified {@code customerProvidedKey}. * * @param customerProvidedKey the {@link CustomerProvidedKey} for the blob, * pass {@code null} to use no customer provided key. * @return a {@link AppendBlobAsyncClient} with the specified {@code customerProvidedKey}. */ @Override public AppendBlobAsyncClient getCustomerProvidedKeyAsyncClient(CustomerProvidedKey customerProvidedKey) { CpkInfo finalCustomerProvidedKey = null; if (customerProvidedKey != null) { finalCustomerProvidedKey = new CpkInfo() .setEncryptionKey(customerProvidedKey.getKey()) .setEncryptionKeySha256(customerProvidedKey.getKeySha256()) .setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm()); } return new AppendBlobAsyncClient(getHttpPipeline(), getAccountUrl(), getServiceVersion(), getAccountName(), getContainerName(), getBlobName(), getSnapshotId(), finalCustomerProvidedKey, encryptionScope, getVersionId()); } /** * Creates a 0-length append blob. Call appendBlock to append data to an append blob. By default this method will * not overwrite an existing blob. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.create --> * <pre> * client.create& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.create --> * * @return A {@link Mono} containing the information of the created appended blob. */ @ServiceMethod(returns = ReturnType.SINGLE) /** * Creates a 0-length append blob. Call appendBlock to append data to an append blob. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.create * <pre> * boolean overwrite = false; & * client.create& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.create * * @param overwrite Whether to overwrite, should data exist on the blob. * * @return A {@link Mono} containing the information of the created appended blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<AppendBlobItem> create(boolean overwrite) { BlobRequestConditions blobRequestConditions = new BlobRequestConditions(); if (!overwrite) { blobRequestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD); } return createWithResponse(null, null, blobRequestConditions).flatMap(FluxUtil::toMono); } /** * Creates a 0-length append blob. Call appendBlock to append data to an append blob. * <p> * To avoid overwriting, pass "*" to {@link BlobRequestConditions * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.createWithResponse * <pre> * BlobHttpHeaders headers = new BlobHttpHeaders& * .setContentType& * .setContentLanguage& * Map&lt;String, String&gt; metadata = Collections.singletonMap& * BlobRequestConditions requestConditions = new BlobRequestConditions& * .setIfUnmodifiedSince& * * client.createWithResponse& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.createWithResponse * * @param headers {@link BlobHttpHeaders} * @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any * metadata key or value, it must be removed or encoded. * @param requestConditions {@link BlobRequestConditions} * @return A {@link Mono} containing {@link Response} whose {@link Response * appended blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<AppendBlobItem>> createWithResponse(BlobHttpHeaders headers, Map<String, String> metadata, BlobRequestConditions requestConditions) { return this.createWithResponse(new AppendBlobCreateOptions().setHeaders(headers).setMetadata(metadata) .setRequestConditions(requestConditions)); } /** * Creates a 0-length append blob. Call appendBlock to append data to an append blob. * <p> * To avoid overwriting, pass "*" to {@link BlobRequestConditions * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.createWithResponse * <pre> * BlobHttpHeaders headers = new BlobHttpHeaders& * .setContentType& * .setContentLanguage& * Map&lt;String, String&gt; metadata = Collections.singletonMap& * Map&lt;String, String&gt; tags = Collections.singletonMap& * BlobRequestConditions requestConditions = new BlobRequestConditions& * .setIfUnmodifiedSince& * * client.createWithResponse& * .setTags& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.createWithResponse * * @param options {@link AppendBlobCreateOptions} * @return A {@link Mono} containing {@link Response} whose {@link Response * appended blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<AppendBlobItem>> createWithResponse(AppendBlobCreateOptions options) { try { return withContext(context -> createWithResponse(options, context)); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } Mono<Response<AppendBlobItem>> createWithResponse(AppendBlobCreateOptions options, Context context) { options = (options == null) ? new AppendBlobCreateOptions() : options; BlobRequestConditions requestConditions = options.getRequestConditions(); requestConditions = (requestConditions == null) ? new BlobRequestConditions() : requestConditions; context = context == null ? Context.NONE : context; BlobImmutabilityPolicy immutabilityPolicy = options.getImmutabilityPolicy() == null ? new BlobImmutabilityPolicy() : options.getImmutabilityPolicy(); return this.azureBlobStorage.getAppendBlobs().createWithResponseAsync(containerName, blobName, 0, null, options.getMetadata(), requestConditions.getLeaseId(), requestConditions.getIfModifiedSince(), requestConditions.getIfUnmodifiedSince(), requestConditions.getIfMatch(), requestConditions.getIfNoneMatch(), requestConditions.getTagsConditions(), null, tagsToString(options.getTags()), immutabilityPolicy.getExpiryTime(), immutabilityPolicy.getPolicyMode(), options.hasLegalHold(), options.getHeaders(), getCustomerProvidedKey(), encryptionScope, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)) .map(rb -> { AppendBlobsCreateHeaders hd = rb.getDeserializedHeaders(); AppendBlobItem item = new AppendBlobItem(hd.getETag(), hd.getLastModified(), hd.getContentMD5(), hd.isXMsRequestServerEncrypted(), hd.getXMsEncryptionKeySha256(), hd.getXMsEncryptionScope(), null, null, hd.getXMsVersionId()); return new SimpleResponse<>(rb, item); }); } /** * Commits a new block of data to the end of the existing append blob. * <p> * Note that the data passed must be replayable if retries are enabled (the default). In other words, the * {@code Flux} must produce the same data each time it is subscribed to. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlock * <pre> * client.appendBlock& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlock * * @param data The data to write to the blob. Note that this {@code Flux} must be replayable if retries are enabled * (the default). In other words, the Flux must produce the same data each time it is subscribed to. * @param length The exact length of the data. It is important that this value match precisely the length of the * data emitted by the {@code Flux}. * @return {@link Mono} containing the information of the append blob operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<AppendBlobItem> appendBlock(Flux<ByteBuffer> data, long length) { return appendBlockWithResponse(data, length, null, null).flatMap(FluxUtil::toMono); } /** * Commits a new block of data to the end of the existing append blob. * <p> * Note that the data passed must be replayable if retries are enabled (the default). In other words, the * {@code Flux} must produce the same data each time it is subscribed to. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockWithResponse * <pre> * byte[] md5 = MessageDigest.getInstance& * AppendBlobRequestConditions requestConditions = new AppendBlobRequestConditions& * .setAppendPosition& * .setMaxSize& * * client.appendBlockWithResponse& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockWithResponse * * @param data The data to write to the blob. Note that this {@code Flux} must be replayable if retries are enabled * (the default). In other words, the Flux must produce the same data each time it is subscribed to. * @param length The exact length of the data. It is important that this value match precisely the length of the * data emitted by the {@code Flux}. * @param contentMd5 An MD5 hash of the block content. This hash is used to verify the integrity of the block during * transport. When this header is specified, the storage service compares the hash of the content that has arrived * with this header value. Note that this MD5 hash is not stored with the blob. If the two hashes do not match, the * operation will fail. * @param appendBlobRequestConditions {@link AppendBlobRequestConditions} * @return A {@link Mono} containing {@link Response} whose {@link Response * blob operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<AppendBlobItem>> appendBlockWithResponse(Flux<ByteBuffer> data, long length, byte[] contentMd5, AppendBlobRequestConditions appendBlobRequestConditions) { try { return withContext(context -> appendBlockWithResponse(data, length, contentMd5, appendBlobRequestConditions, context)); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } Mono<Response<AppendBlobItem>> appendBlockWithResponse(Flux<ByteBuffer> data, long length, byte[] contentMd5, AppendBlobRequestConditions appendBlobRequestConditions, Context context) { appendBlobRequestConditions = appendBlobRequestConditions == null ? new AppendBlobRequestConditions() : appendBlobRequestConditions; context = context == null ? Context.NONE : context; return this.azureBlobStorage.getAppendBlobs().appendBlockWithResponseAsync( containerName, blobName, length, data, null, contentMd5, null, appendBlobRequestConditions.getLeaseId(), appendBlobRequestConditions.getMaxSize(), appendBlobRequestConditions.getAppendPosition(), appendBlobRequestConditions.getIfModifiedSince(), appendBlobRequestConditions.getIfUnmodifiedSince(), appendBlobRequestConditions.getIfMatch(), appendBlobRequestConditions.getIfNoneMatch(), appendBlobRequestConditions.getTagsConditions(), null, getCustomerProvidedKey(), encryptionScope, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)) .map(rb -> { AppendBlobsAppendBlockHeaders hd = rb.getDeserializedHeaders(); AppendBlobItem item = new AppendBlobItem(hd.getETag(), hd.getLastModified(), hd.getContentMD5(), hd.isXMsRequestServerEncrypted(), hd.getXMsEncryptionKeySha256(), hd.getXMsEncryptionScope(), hd.getXMsBlobAppendOffset(), hd.getXMsBlobCommittedBlockCount()); return new SimpleResponse<>(rb, item); }); } /** * Commits a new block of data from another blob to the end of this append blob. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockFromUrl * <pre> * client.appendBlockFromUrl& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockFromUrl * * @param sourceUrl The url to the blob that will be the source of the copy. A source blob in the same storage * account can be authenticated via Shared Key. However, if the source is a blob in another account, the source blob * must either be public or must be authenticated via a shared access signature. If the source blob is public, no * authentication is required to perform the operation. * @param sourceRange The source {@link BlobRange} to copy. * @return {@link Mono} containing the information of the append blob operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<AppendBlobItem> appendBlockFromUrl(String sourceUrl, BlobRange sourceRange) { return appendBlockFromUrlWithResponse(sourceUrl, sourceRange, null, null, null).flatMap(FluxUtil::toMono); } /** * Commits a new block of data from another blob to the end of this append blob. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockFromUrlWithResponse * <pre> * AppendBlobRequestConditions appendBlobRequestConditions = new AppendBlobRequestConditions& * .setAppendPosition& * .setMaxSize& * * BlobRequestConditions modifiedRequestConditions = new BlobRequestConditions& * .setIfUnmodifiedSince& * * client.appendBlockFromUrlWithResponse& * appendBlobRequestConditions, modifiedRequestConditions& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockFromUrlWithResponse * * @param sourceUrl The url to the blob that will be the source of the copy. A source blob in the same storage * account can be authenticated via Shared Key. However, if the source is a blob in another account, the source blob * must either be public or must be authenticated via a shared access signature. If the source blob is public, no * authentication is required to perform the operation. * @param sourceRange {@link BlobRange} * @param sourceContentMD5 An MD5 hash of the block content from the source blob. If specified, the service will * calculate the MD5 of the received data and fail the request if it does not match the provided MD5. * @param destRequestConditions {@link AppendBlobRequestConditions} * @param sourceRequestConditions {@link BlobRequestConditions} * @return A {@link Mono} containing {@link Response} whose {@link Response * blob operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<AppendBlobItem>> appendBlockFromUrlWithResponse(String sourceUrl, BlobRange sourceRange, byte[] sourceContentMD5, AppendBlobRequestConditions destRequestConditions, BlobRequestConditions sourceRequestConditions) { return appendBlockFromUrlWithResponse(new AppendBlobAppendBlockFromUrlOptions(sourceUrl) .setSourceRange(sourceRange).setSourceContentMd5(sourceContentMD5) .setDestinationRequestConditions(destRequestConditions) .setSourceRequestConditions(sourceRequestConditions)); } /** * Commits a new block of data from another blob to the end of this append blob. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockFromUrlWithResponse * <pre> * AppendBlobRequestConditions appendBlobRequestConditions = new AppendBlobRequestConditions& * .setAppendPosition& * .setMaxSize& * * BlobRequestConditions modifiedRequestConditions = new BlobRequestConditions& * .setIfUnmodifiedSince& * * client.appendBlockFromUrlWithResponse& * .setSourceRange& * .setDestinationRequestConditions& * .setSourceRequestConditions& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockFromUrlWithResponse * * @param options Parameters for the operation. * @return A {@link Mono} containing {@link Response} whose {@link Response * blob operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<AppendBlobItem>> appendBlockFromUrlWithResponse(AppendBlobAppendBlockFromUrlOptions options) { try { return withContext(context -> appendBlockFromUrlWithResponse(options, context)); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } Mono<Response<AppendBlobItem>> appendBlockFromUrlWithResponse(AppendBlobAppendBlockFromUrlOptions options, Context context) { BlobRange sourceRange = (options.getSourceRange() == null) ? new BlobRange(0) : options.getSourceRange(); AppendBlobRequestConditions destRequestConditions = (options.getDestinationRequestConditions() == null) ? new AppendBlobRequestConditions() : options.getDestinationRequestConditions(); RequestConditions sourceRequestConditions = (options.getSourceRequestConditions() == null) ? new RequestConditions() : options.getSourceRequestConditions(); try { new URL(options.getSourceUrl()); } catch (MalformedURLException ex) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'sourceUrl' is not a valid url.")); } context = context == null ? Context.NONE : context; String sourceAuth = options.getSourceAuthorization() == null ? null : options.getSourceAuthorization().toString(); return this.azureBlobStorage.getAppendBlobs().appendBlockFromUrlWithResponseAsync(containerName, blobName, options.getSourceUrl(), 0, sourceRange.toString(), options.getSourceContentMd5(), null, null, null, destRequestConditions.getLeaseId(), destRequestConditions.getMaxSize(), destRequestConditions.getAppendPosition(), destRequestConditions.getIfModifiedSince(), destRequestConditions.getIfUnmodifiedSince(), destRequestConditions.getIfMatch(), destRequestConditions.getIfNoneMatch(), destRequestConditions.getTagsConditions(), sourceRequestConditions.getIfModifiedSince(), sourceRequestConditions.getIfUnmodifiedSince(), sourceRequestConditions.getIfMatch(), sourceRequestConditions.getIfNoneMatch(), null, sourceAuth, getCustomerProvidedKey(), encryptionScope, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)) .map(rb -> { AppendBlobsAppendBlockFromUrlHeaders hd = rb.getDeserializedHeaders(); AppendBlobItem item = new AppendBlobItem(hd.getETag(), hd.getLastModified(), hd.getContentMD5(), hd.isXMsRequestServerEncrypted(), hd.getXMsEncryptionKeySha256(), hd.getXMsEncryptionScope(), hd.getXMsBlobAppendOffset(), hd.getXMsBlobCommittedBlockCount()); return new SimpleResponse<>(rb, item); }); } /** * Seals an append blob, making it read only. Any subsequent appends will fail. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.seal --> * <pre> * client.seal& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.seal --> * * @return A reactive response signalling completion. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> seal() { return sealWithResponse(new AppendBlobSealOptions()).flatMap(FluxUtil::toMono); } /** * Seals an append blob, making it read only. Any subsequent appends will fail. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.sealWithResponse * <pre> * AppendBlobRequestConditions requestConditions = new AppendBlobRequestConditions& * .setIfUnmodifiedSince& * * client.sealWithResponse& * .subscribe& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.sealWithResponse * * @param options {@link AppendBlobSealOptions} * @return A reactive response signalling completion. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> sealWithResponse(AppendBlobSealOptions options) { try { return withContext(context -> sealWithResponse(options, context)); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } Mono<Response<Void>> sealWithResponse(AppendBlobSealOptions options, Context context) { options = (options == null) ? new AppendBlobSealOptions() : options; AppendBlobRequestConditions requestConditions = options.getRequestConditions(); requestConditions = (requestConditions == null) ? new AppendBlobRequestConditions() : requestConditions; context = context == null ? Context.NONE : context; return this.azureBlobStorage.getAppendBlobs().sealWithResponseAsync(containerName, blobName, null, null, requestConditions.getLeaseId(), requestConditions.getIfModifiedSince(), requestConditions.getIfUnmodifiedSince(), requestConditions.getIfMatch(), requestConditions.getIfNoneMatch(), requestConditions.getAppendPosition(), context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)) .map(response -> new SimpleResponse<>(response, null)); } }
class AppendBlobAsyncClient extends BlobAsyncClientBase { private static final ClientLogger LOGGER = new ClientLogger(AppendBlobAsyncClient.class); /** * Indicates the maximum number of bytes that can be sent in a call to appendBlock. */ public static final int MAX_APPEND_BLOCK_BYTES = 4 * Constants.MB; /** * Indicates the maximum number of blocks allowed in an append blob. */ public static final int MAX_BLOCKS = 50000; /** * Package-private constructor for use by {@link SpecializedBlobClientBuilder}. * * @param pipeline The pipeline used to send and receive service requests. * @param url The endpoint where to send service requests. * @param serviceVersion The version of the service to receive requests. * @param accountName The storage account name. * @param containerName The container name. * @param blobName The blob name. * @param snapshot The snapshot identifier for the blob, pass {@code null} to interact with the blob directly. * @param customerProvidedKey Customer provided key used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. * @param encryptionScope Encryption scope used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. * @param versionId The version identifier for the blob, pass {@code null} to interact with the latest blob version. */ AppendBlobAsyncClient(HttpPipeline pipeline, String url, BlobServiceVersion serviceVersion, String accountName, String containerName, String blobName, String snapshot, CpkInfo customerProvidedKey, EncryptionScope encryptionScope, String versionId) { super(pipeline, url, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey, encryptionScope, versionId); } /** * Creates a new {@link AppendBlobAsyncClient} with the specified {@code encryptionScope}. * * @param encryptionScope the encryption scope for the blob, pass {@code null} to use no encryption scope. * @return a {@link AppendBlobAsyncClient} with the specified {@code encryptionScope}. */ @Override public AppendBlobAsyncClient getEncryptionScopeAsyncClient(String encryptionScope) { EncryptionScope finalEncryptionScope = null; if (encryptionScope != null) { finalEncryptionScope = new EncryptionScope().setEncryptionScope(encryptionScope); } return new AppendBlobAsyncClient(getHttpPipeline(), getAccountUrl(), getServiceVersion(), getAccountName(), getContainerName(), getBlobName(), getSnapshotId(), getCustomerProvidedKey(), finalEncryptionScope, getVersionId()); } /** * Creates a new {@link AppendBlobAsyncClient} with the specified {@code customerProvidedKey}. * * @param customerProvidedKey the {@link CustomerProvidedKey} for the blob, * pass {@code null} to use no customer provided key. * @return a {@link AppendBlobAsyncClient} with the specified {@code customerProvidedKey}. */ @Override public AppendBlobAsyncClient getCustomerProvidedKeyAsyncClient(CustomerProvidedKey customerProvidedKey) { CpkInfo finalCustomerProvidedKey = null; if (customerProvidedKey != null) { finalCustomerProvidedKey = new CpkInfo() .setEncryptionKey(customerProvidedKey.getKey()) .setEncryptionKeySha256(customerProvidedKey.getKeySha256()) .setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm()); } return new AppendBlobAsyncClient(getHttpPipeline(), getAccountUrl(), getServiceVersion(), getAccountName(), getContainerName(), getBlobName(), getSnapshotId(), finalCustomerProvidedKey, encryptionScope, getVersionId()); } /** * Creates a 0-length append blob. Call appendBlock to append data to an append blob. By default this method will * not overwrite an existing blob. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.create --> * <pre> * client.create& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.create --> * * @return A {@link Mono} containing the information of the created appended blob. */ @ServiceMethod(returns = ReturnType.SINGLE) /** * Creates a 0-length append blob. Call appendBlock to append data to an append blob. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.create * <pre> * boolean overwrite = false; & * client.create& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.create * * @param overwrite Whether to overwrite, should data exist on the blob. * * @return A {@link Mono} containing the information of the created appended blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<AppendBlobItem> create(boolean overwrite) { BlobRequestConditions blobRequestConditions = new BlobRequestConditions(); if (!overwrite) { blobRequestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD); } return createWithResponse(null, null, blobRequestConditions).flatMap(FluxUtil::toMono); } /** * Creates a 0-length append blob. Call appendBlock to append data to an append blob. * <p> * To avoid overwriting, pass "*" to {@link BlobRequestConditions * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.createWithResponse * <pre> * BlobHttpHeaders headers = new BlobHttpHeaders& * .setContentType& * .setContentLanguage& * Map&lt;String, String&gt; metadata = Collections.singletonMap& * BlobRequestConditions requestConditions = new BlobRequestConditions& * .setIfUnmodifiedSince& * * client.createWithResponse& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.createWithResponse * * @param headers {@link BlobHttpHeaders} * @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any * metadata key or value, it must be removed or encoded. * @param requestConditions {@link BlobRequestConditions} * @return A {@link Mono} containing {@link Response} whose {@link Response * appended blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<AppendBlobItem>> createWithResponse(BlobHttpHeaders headers, Map<String, String> metadata, BlobRequestConditions requestConditions) { return this.createWithResponse(new AppendBlobCreateOptions().setHeaders(headers).setMetadata(metadata) .setRequestConditions(requestConditions)); } /** * Creates a 0-length append blob. Call appendBlock to append data to an append blob. * <p> * To avoid overwriting, pass "*" to {@link BlobRequestConditions * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.createWithResponse * <pre> * BlobHttpHeaders headers = new BlobHttpHeaders& * .setContentType& * .setContentLanguage& * Map&lt;String, String&gt; metadata = Collections.singletonMap& * Map&lt;String, String&gt; tags = Collections.singletonMap& * BlobRequestConditions requestConditions = new BlobRequestConditions& * .setIfUnmodifiedSince& * * client.createWithResponse& * .setTags& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.createWithResponse * * @param options {@link AppendBlobCreateOptions} * @return A {@link Mono} containing {@link Response} whose {@link Response * appended blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<AppendBlobItem>> createWithResponse(AppendBlobCreateOptions options) { try { return withContext(context -> createWithResponse(options, context)); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } Mono<Response<AppendBlobItem>> createWithResponse(AppendBlobCreateOptions options, Context context) { options = (options == null) ? new AppendBlobCreateOptions() : options; BlobRequestConditions requestConditions = options.getRequestConditions(); requestConditions = (requestConditions == null) ? new BlobRequestConditions() : requestConditions; context = context == null ? Context.NONE : context; BlobImmutabilityPolicy immutabilityPolicy = options.getImmutabilityPolicy() == null ? new BlobImmutabilityPolicy() : options.getImmutabilityPolicy(); return this.azureBlobStorage.getAppendBlobs().createWithResponseAsync(containerName, blobName, 0, null, options.getMetadata(), requestConditions.getLeaseId(), requestConditions.getIfModifiedSince(), requestConditions.getIfUnmodifiedSince(), requestConditions.getIfMatch(), requestConditions.getIfNoneMatch(), requestConditions.getTagsConditions(), null, tagsToString(options.getTags()), immutabilityPolicy.getExpiryTime(), immutabilityPolicy.getPolicyMode(), options.hasLegalHold(), options.getHeaders(), getCustomerProvidedKey(), encryptionScope, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)) .map(rb -> { AppendBlobsCreateHeaders hd = rb.getDeserializedHeaders(); AppendBlobItem item = new AppendBlobItem(hd.getETag(), hd.getLastModified(), hd.getContentMD5(), hd.isXMsRequestServerEncrypted(), hd.getXMsEncryptionKeySha256(), hd.getXMsEncryptionScope(), null, null, hd.getXMsVersionId()); return new SimpleResponse<>(rb, item); }); } /** * Commits a new block of data to the end of the existing append blob. * <p> * Note that the data passed must be replayable if retries are enabled (the default). In other words, the * {@code Flux} must produce the same data each time it is subscribed to. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlock * <pre> * client.appendBlock& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlock * * @param data The data to write to the blob. Note that this {@code Flux} must be replayable if retries are enabled * (the default). In other words, the Flux must produce the same data each time it is subscribed to. * @param length The exact length of the data. It is important that this value match precisely the length of the * data emitted by the {@code Flux}. * @return {@link Mono} containing the information of the append blob operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<AppendBlobItem> appendBlock(Flux<ByteBuffer> data, long length) { return appendBlockWithResponse(data, length, null, null).flatMap(FluxUtil::toMono); } /** * Commits a new block of data to the end of the existing append blob. * <p> * Note that the data passed must be replayable if retries are enabled (the default). In other words, the * {@code Flux} must produce the same data each time it is subscribed to. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockWithResponse * <pre> * byte[] md5 = MessageDigest.getInstance& * AppendBlobRequestConditions requestConditions = new AppendBlobRequestConditions& * .setAppendPosition& * .setMaxSize& * * client.appendBlockWithResponse& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockWithResponse * * @param data The data to write to the blob. Note that this {@code Flux} must be replayable if retries are enabled * (the default). In other words, the Flux must produce the same data each time it is subscribed to. * @param length The exact length of the data. It is important that this value match precisely the length of the * data emitted by the {@code Flux}. * @param contentMd5 An MD5 hash of the block content. This hash is used to verify the integrity of the block during * transport. When this header is specified, the storage service compares the hash of the content that has arrived * with this header value. Note that this MD5 hash is not stored with the blob. If the two hashes do not match, the * operation will fail. * @param appendBlobRequestConditions {@link AppendBlobRequestConditions} * @return A {@link Mono} containing {@link Response} whose {@link Response * blob operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<AppendBlobItem>> appendBlockWithResponse(Flux<ByteBuffer> data, long length, byte[] contentMd5, AppendBlobRequestConditions appendBlobRequestConditions) { try { return withContext(context -> appendBlockWithResponse(data, length, contentMd5, appendBlobRequestConditions, context)); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } Mono<Response<AppendBlobItem>> appendBlockWithResponse(Flux<ByteBuffer> data, long length, byte[] contentMd5, AppendBlobRequestConditions appendBlobRequestConditions, Context context) { appendBlobRequestConditions = appendBlobRequestConditions == null ? new AppendBlobRequestConditions() : appendBlobRequestConditions; context = context == null ? Context.NONE : context; return this.azureBlobStorage.getAppendBlobs().appendBlockWithResponseAsync( containerName, blobName, length, data, null, contentMd5, null, appendBlobRequestConditions.getLeaseId(), appendBlobRequestConditions.getMaxSize(), appendBlobRequestConditions.getAppendPosition(), appendBlobRequestConditions.getIfModifiedSince(), appendBlobRequestConditions.getIfUnmodifiedSince(), appendBlobRequestConditions.getIfMatch(), appendBlobRequestConditions.getIfNoneMatch(), appendBlobRequestConditions.getTagsConditions(), null, getCustomerProvidedKey(), encryptionScope, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)) .map(rb -> { AppendBlobsAppendBlockHeaders hd = rb.getDeserializedHeaders(); AppendBlobItem item = new AppendBlobItem(hd.getETag(), hd.getLastModified(), hd.getContentMD5(), hd.isXMsRequestServerEncrypted(), hd.getXMsEncryptionKeySha256(), hd.getXMsEncryptionScope(), hd.getXMsBlobAppendOffset(), hd.getXMsBlobCommittedBlockCount()); return new SimpleResponse<>(rb, item); }); } /** * Commits a new block of data from another blob to the end of this append blob. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockFromUrl * <pre> * client.appendBlockFromUrl& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockFromUrl * * @param sourceUrl The url to the blob that will be the source of the copy. A source blob in the same storage * account can be authenticated via Shared Key. However, if the source is a blob in another account, the source blob * must either be public or must be authenticated via a shared access signature. If the source blob is public, no * authentication is required to perform the operation. * @param sourceRange The source {@link BlobRange} to copy. * @return {@link Mono} containing the information of the append blob operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<AppendBlobItem> appendBlockFromUrl(String sourceUrl, BlobRange sourceRange) { return appendBlockFromUrlWithResponse(sourceUrl, sourceRange, null, null, null).flatMap(FluxUtil::toMono); } /** * Commits a new block of data from another blob to the end of this append blob. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockFromUrlWithResponse * <pre> * AppendBlobRequestConditions appendBlobRequestConditions = new AppendBlobRequestConditions& * .setAppendPosition& * .setMaxSize& * * BlobRequestConditions modifiedRequestConditions = new BlobRequestConditions& * .setIfUnmodifiedSince& * * client.appendBlockFromUrlWithResponse& * appendBlobRequestConditions, modifiedRequestConditions& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockFromUrlWithResponse * * @param sourceUrl The url to the blob that will be the source of the copy. A source blob in the same storage * account can be authenticated via Shared Key. However, if the source is a blob in another account, the source blob * must either be public or must be authenticated via a shared access signature. If the source blob is public, no * authentication is required to perform the operation. * @param sourceRange {@link BlobRange} * @param sourceContentMD5 An MD5 hash of the block content from the source blob. If specified, the service will * calculate the MD5 of the received data and fail the request if it does not match the provided MD5. * @param destRequestConditions {@link AppendBlobRequestConditions} * @param sourceRequestConditions {@link BlobRequestConditions} * @return A {@link Mono} containing {@link Response} whose {@link Response * blob operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<AppendBlobItem>> appendBlockFromUrlWithResponse(String sourceUrl, BlobRange sourceRange, byte[] sourceContentMD5, AppendBlobRequestConditions destRequestConditions, BlobRequestConditions sourceRequestConditions) { return appendBlockFromUrlWithResponse(new AppendBlobAppendBlockFromUrlOptions(sourceUrl) .setSourceRange(sourceRange).setSourceContentMd5(sourceContentMD5) .setDestinationRequestConditions(destRequestConditions) .setSourceRequestConditions(sourceRequestConditions)); } /** * Commits a new block of data from another blob to the end of this append blob. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockFromUrlWithResponse * <pre> * AppendBlobRequestConditions appendBlobRequestConditions = new AppendBlobRequestConditions& * .setAppendPosition& * .setMaxSize& * * BlobRequestConditions modifiedRequestConditions = new BlobRequestConditions& * .setIfUnmodifiedSince& * * client.appendBlockFromUrlWithResponse& * .setSourceRange& * .setDestinationRequestConditions& * .setSourceRequestConditions& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockFromUrlWithResponse * * @param options Parameters for the operation. * @return A {@link Mono} containing {@link Response} whose {@link Response * blob operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<AppendBlobItem>> appendBlockFromUrlWithResponse(AppendBlobAppendBlockFromUrlOptions options) { try { return withContext(context -> appendBlockFromUrlWithResponse(options, context)); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } Mono<Response<AppendBlobItem>> appendBlockFromUrlWithResponse(AppendBlobAppendBlockFromUrlOptions options, Context context) { BlobRange sourceRange = (options.getSourceRange() == null) ? new BlobRange(0) : options.getSourceRange(); AppendBlobRequestConditions destRequestConditions = (options.getDestinationRequestConditions() == null) ? new AppendBlobRequestConditions() : options.getDestinationRequestConditions(); RequestConditions sourceRequestConditions = (options.getSourceRequestConditions() == null) ? new RequestConditions() : options.getSourceRequestConditions(); try { new URL(options.getSourceUrl()); } catch (MalformedURLException ex) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'sourceUrl' is not a valid url.")); } context = context == null ? Context.NONE : context; String sourceAuth = options.getSourceAuthorization() == null ? null : options.getSourceAuthorization().toString(); return this.azureBlobStorage.getAppendBlobs().appendBlockFromUrlWithResponseAsync(containerName, blobName, options.getSourceUrl(), 0, sourceRange.toString(), options.getSourceContentMd5(), null, null, null, destRequestConditions.getLeaseId(), destRequestConditions.getMaxSize(), destRequestConditions.getAppendPosition(), destRequestConditions.getIfModifiedSince(), destRequestConditions.getIfUnmodifiedSince(), destRequestConditions.getIfMatch(), destRequestConditions.getIfNoneMatch(), destRequestConditions.getTagsConditions(), sourceRequestConditions.getIfModifiedSince(), sourceRequestConditions.getIfUnmodifiedSince(), sourceRequestConditions.getIfMatch(), sourceRequestConditions.getIfNoneMatch(), null, sourceAuth, getCustomerProvidedKey(), encryptionScope, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)) .map(rb -> { AppendBlobsAppendBlockFromUrlHeaders hd = rb.getDeserializedHeaders(); AppendBlobItem item = new AppendBlobItem(hd.getETag(), hd.getLastModified(), hd.getContentMD5(), hd.isXMsRequestServerEncrypted(), hd.getXMsEncryptionKeySha256(), hd.getXMsEncryptionScope(), hd.getXMsBlobAppendOffset(), hd.getXMsBlobCommittedBlockCount()); return new SimpleResponse<>(rb, item); }); } /** * Seals an append blob, making it read only. Any subsequent appends will fail. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.seal --> * <pre> * client.seal& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.seal --> * * @return A reactive response signalling completion. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> seal() { return sealWithResponse(new AppendBlobSealOptions()).flatMap(FluxUtil::toMono); } /** * Seals an append blob, making it read only. Any subsequent appends will fail. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.AppendBlobAsyncClient.sealWithResponse * <pre> * AppendBlobRequestConditions requestConditions = new AppendBlobRequestConditions& * .setIfUnmodifiedSince& * * client.sealWithResponse& * .subscribe& * </pre> * <!-- end com.azure.storage.blob.specialized.AppendBlobAsyncClient.sealWithResponse * * @param options {@link AppendBlobSealOptions} * @return A reactive response signalling completion. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> sealWithResponse(AppendBlobSealOptions options) { try { return withContext(context -> sealWithResponse(options, context)); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } Mono<Response<Void>> sealWithResponse(AppendBlobSealOptions options, Context context) { options = (options == null) ? new AppendBlobSealOptions() : options; AppendBlobRequestConditions requestConditions = options.getRequestConditions(); requestConditions = (requestConditions == null) ? new AppendBlobRequestConditions() : requestConditions; context = context == null ? Context.NONE : context; return this.azureBlobStorage.getAppendBlobs().sealWithResponseAsync(containerName, blobName, null, null, requestConditions.getLeaseId(), requestConditions.getIfModifiedSince(), requestConditions.getIfUnmodifiedSince(), requestConditions.getIfMatch(), requestConditions.getIfNoneMatch(), requestConditions.getAppendPosition(), context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)) .map(response -> new SimpleResponse<>(response, null)); } }
A bit strange to only return the first. User can still get all if call `userKubeConfigs(format)`?
public byte[] userKubeConfigContent(Format format) { if (format == null) { return userKubeConfigContent(); } for (CredentialResult config : userKubeConfigs(format)) { return config.value(); } return new byte[0]; }
}
public byte[] userKubeConfigContent(Format format) { if (format == null) { return userKubeConfigContent(); } for (CredentialResult config : userKubeConfigs(format)) { return config.value(); } return new byte[0]; }
class KubernetesClusterImpl extends GroupableResourceImpl< KubernetesCluster, ManagedClusterInner, KubernetesClusterImpl, ContainerServiceManager> implements KubernetesCluster, KubernetesCluster.Definition, KubernetesCluster.Update { private final ClientLogger logger = new ClientLogger(getClass()); private List<CredentialResult> adminKubeConfigs; private List<CredentialResult> userKubeConfigs; private final Map<Format, List<CredentialResult>> formatUserKubeConfigsMap = new ConcurrentHashMap<>(); protected KubernetesClusterImpl(String name, ManagedClusterInner innerObject, ContainerServiceManager manager) { super(name, innerObject, manager); if (this.innerModel().agentPoolProfiles() == null) { this.innerModel().withAgentPoolProfiles(new ArrayList<>()); } this.adminKubeConfigs = null; this.userKubeConfigs = null; } @Override public String provisioningState() { return this.innerModel().provisioningState(); } @Override public String dnsPrefix() { return this.innerModel().dnsPrefix(); } @Override public String fqdn() { return this.innerModel().fqdn(); } @Override public String version() { return this.innerModel().kubernetesVersion(); } @Override public List<CredentialResult> adminKubeConfigs() { if (this.adminKubeConfigs == null || this.adminKubeConfigs.size() == 0) { this.adminKubeConfigs = this.manager().kubernetesClusters().listAdminKubeConfigContent(this.resourceGroupName(), this.name()); } return Collections.unmodifiableList(this.adminKubeConfigs); } @Override public List<CredentialResult> userKubeConfigs() { if (this.userKubeConfigs == null || this.userKubeConfigs.size() == 0) { this.userKubeConfigs = this.manager().kubernetesClusters().listUserKubeConfigContent(this.resourceGroupName(), this.name()); } return Collections.unmodifiableList(this.userKubeConfigs); } @Override public List<CredentialResult> userKubeConfigs(Format format) { if (format == null) { return userKubeConfigs(); } return Collections.unmodifiableList( this.formatUserKubeConfigsMap.computeIfAbsent( format, key -> KubernetesClusterImpl.this .manager() .kubernetesClusters() .listUserKubeConfigContent( KubernetesClusterImpl.this.resourceGroupName(), KubernetesClusterImpl.this.name(), format )) ); } @Override public byte[] adminKubeConfigContent() { for (CredentialResult config : adminKubeConfigs()) { return config.value(); } return new byte[0]; } @Override public byte[] userKubeConfigContent() { for (CredentialResult config : userKubeConfigs()) { return config.value(); } return new byte[0]; } @Override @Override public String servicePrincipalClientId() { if (this.innerModel().servicePrincipalProfile() != null) { return this.innerModel().servicePrincipalProfile().clientId(); } else { return null; } } @Override public String servicePrincipalSecret() { if (this.innerModel().servicePrincipalProfile() != null) { return this.innerModel().servicePrincipalProfile().secret(); } else { return null; } } @Override public String linuxRootUsername() { if (this.innerModel().linuxProfile() != null) { return this.innerModel().linuxProfile().adminUsername(); } else { return null; } } @Override public String sshKey() { if (this.innerModel().linuxProfile() == null || this.innerModel().linuxProfile().ssh() == null || this.innerModel().linuxProfile().ssh().publicKeys() == null || this.innerModel().linuxProfile().ssh().publicKeys().size() == 0) { return null; } else { return this.innerModel().linuxProfile().ssh().publicKeys().get(0).keyData(); } } @Override public Map<String, KubernetesClusterAgentPool> agentPools() { Map<String, KubernetesClusterAgentPool> agentPoolMap = new HashMap<>(); if (this.innerModel().agentPoolProfiles() != null && this.innerModel().agentPoolProfiles().size() > 0) { for (ManagedClusterAgentPoolProfile agentPoolProfile : this.innerModel().agentPoolProfiles()) { agentPoolMap.put(agentPoolProfile.name(), new KubernetesClusterAgentPoolImpl(agentPoolProfile, this)); } } return Collections.unmodifiableMap(agentPoolMap); } @Override public ContainerServiceNetworkProfile networkProfile() { return this.innerModel().networkProfile(); } @Override public Map<String, ManagedClusterAddonProfile> addonProfiles() { return Collections.unmodifiableMap(this.innerModel().addonProfiles()); } @Override public String nodeResourceGroup() { return this.innerModel().nodeResourceGroup(); } @Override public boolean enableRBAC() { return this.innerModel().enableRbac(); } @Override public PowerState powerState() { return this.innerModel().powerState(); } @Override public String systemAssignedManagedServiceIdentityPrincipalId() { String objectId = null; if (this.innerModel().identityProfile() != null) { UserAssignedIdentity identity = this.innerModel().identityProfile().get("kubeletidentity"); if (identity != null) { objectId = identity.objectId(); } } return objectId; } @Override public void start() { this.startAsync().block(); } @Override public Mono<Void> startAsync() { return manager().kubernetesClusters().startAsync(this.resourceGroupName(), this.name()); } @Override public void stop() { this.stopAsync().block(); } @Override public Mono<Void> stopAsync() { return manager().kubernetesClusters().stopAsync(this.resourceGroupName(), this.name()); } private Mono<List<CredentialResult>> listAdminConfig(final KubernetesClusterImpl self) { return this .manager() .kubernetesClusters() .listAdminKubeConfigContentAsync(self.resourceGroupName(), self.name()) .map( kubeConfigs -> { self.adminKubeConfigs = kubeConfigs; return self.adminKubeConfigs; }); } private Mono<List<CredentialResult>> listUserConfig(final KubernetesClusterImpl self) { return this .manager() .kubernetesClusters() .listUserKubeConfigContentAsync(self.resourceGroupName(), self.name()) .map( kubeConfigs -> { self.userKubeConfigs = kubeConfigs; return self.userKubeConfigs; }); } @Override protected Mono<ManagedClusterInner> getInnerAsync() { final KubernetesClusterImpl self = this; final Mono<List<CredentialResult>> adminConfig = listAdminConfig(self); final Mono<List<CredentialResult>> userConfig = listUserConfig(self); return this .manager() .serviceClient() .getManagedClusters() .getByResourceGroupAsync(this.resourceGroupName(), this.name()) .flatMap( managedClusterInner -> Flux.merge(adminConfig, userConfig).last().map(bytes -> managedClusterInner)); } @Override public Mono<KubernetesCluster> createResourceAsync() { final KubernetesClusterImpl self = this; if (!this.isInCreateMode()) { this.innerModel().withServicePrincipalProfile(null); } final Mono<List<CredentialResult>> adminConfig = listAdminConfig(self); final Mono<List<CredentialResult>> userConfig = listUserConfig(self); return this .manager() .serviceClient() .getManagedClusters() .createOrUpdateAsync(self.resourceGroupName(), self.name(), self.innerModel()) .flatMap( inner -> Flux .merge(adminConfig, userConfig) .last() .map( bytes -> { self.setInner(inner); return self; })); } @Override public KubernetesClusterImpl withVersion(String kubernetesVersion) { this.innerModel().withKubernetesVersion(kubernetesVersion); return this; } @Override public KubernetesClusterImpl withDefaultVersion() { this.innerModel().withKubernetesVersion(""); return this; } @Override public KubernetesClusterImpl withRootUsername(String rootUserName) { if (this.innerModel().linuxProfile() == null) { this.innerModel().withLinuxProfile(new ContainerServiceLinuxProfile()); } this.innerModel().linuxProfile().withAdminUsername(rootUserName); return this; } @Override public KubernetesClusterImpl withSshKey(String sshKeyData) { this .innerModel() .linuxProfile() .withSsh( new ContainerServiceSshConfiguration().withPublicKeys(new ArrayList<ContainerServiceSshPublicKey>())); this.innerModel().linuxProfile().ssh().publicKeys().add( new ContainerServiceSshPublicKey().withKeyData(sshKeyData)); return this; } @Override public KubernetesClusterImpl withServicePrincipalClientId(String clientId) { this.innerModel().withServicePrincipalProfile( new ManagedClusterServicePrincipalProfile().withClientId(clientId)); return this; } @Override public KubernetesClusterImpl withSystemAssignedManagedServiceIdentity() { this.innerModel().withIdentity(new ManagedClusterIdentity().withType(ResourceIdentityType.SYSTEM_ASSIGNED)); return this; } @Override public KubernetesClusterImpl withServicePrincipalSecret(String secret) { this.innerModel().servicePrincipalProfile().withSecret(secret); return this; } @Override public KubernetesClusterImpl withDnsPrefix(String dnsPrefix) { this.innerModel().withDnsPrefix(dnsPrefix); return this; } @Override public KubernetesClusterAgentPoolImpl defineAgentPool(String name) { ManagedClusterAgentPoolProfile innerPoolProfile = new ManagedClusterAgentPoolProfile(); innerPoolProfile.withName(name); return new KubernetesClusterAgentPoolImpl(innerPoolProfile, this); } @Override public KubernetesClusterAgentPoolImpl updateAgentPool(String name) { for (ManagedClusterAgentPoolProfile agentPoolProfile : innerModel().agentPoolProfiles()) { if (agentPoolProfile.name().equals(name)) { return new KubernetesClusterAgentPoolImpl(agentPoolProfile, this); } } throw logger.logExceptionAsError(new IllegalArgumentException(String.format( "Cannot get agent pool named %s", name))); } @Override public Update withoutAgentPool(String name) { if (innerModel().agentPoolProfiles() != null) { innerModel().withAgentPoolProfiles( innerModel().agentPoolProfiles().stream() .filter(p -> !name.equals(p.name())) .collect(Collectors.toList())); this.addDependency(context -> manager().serviceClient().getAgentPools().deleteAsync(resourceGroupName(), name(), name) .then(context.voidMono())); } return this; } @Override public KubernetesCluster.DefinitionStages.NetworkProfileDefinitionStages.Blank< KubernetesCluster.DefinitionStages.WithCreate> defineNetworkProfile() { return new KubernetesClusterNetworkProfileImpl(this); } @Override public KubernetesClusterImpl withAddOnProfiles(Map<String, ManagedClusterAddonProfile> addOnProfileMap) { this.innerModel().withAddonProfiles(addOnProfileMap); return this; } @Override public KubernetesClusterImpl withNetworkProfile(ContainerServiceNetworkProfile networkProfile) { this.innerModel().withNetworkProfile(networkProfile); return this; } @Override public KubernetesClusterImpl withRBACEnabled() { this.innerModel().withEnableRbac(true); return this; } @Override public KubernetesClusterImpl withRBACDisabled() { this.innerModel().withEnableRbac(false); return this; } public KubernetesClusterImpl addNewAgentPool(KubernetesClusterAgentPoolImpl agentPool) { if (!isInCreateMode()) { this.addDependency(context -> manager().serviceClient().getAgentPools().createOrUpdateAsync( resourceGroupName(), name(), agentPool.name(), agentPool.getAgentPoolInner()) .then(context.voidMono())); } innerModel().agentPoolProfiles().add(agentPool.innerModel()); return this; } @Override public KubernetesClusterImpl withAutoScalerProfile(ManagedClusterPropertiesAutoScalerProfile autoScalerProfile) { this.innerModel().withAutoScalerProfile(autoScalerProfile); return this; } @Override public KubernetesClusterImpl enablePrivateCluster() { if (innerModel().apiServerAccessProfile() == null) { innerModel().withApiServerAccessProfile(new ManagedClusterApiServerAccessProfile()); } innerModel().apiServerAccessProfile().withEnablePrivateCluster(true); return this; } @Override public PagedIterable<PrivateLinkResource> listPrivateLinkResources() { return new PagedIterable<>(listPrivateLinkResourcesAsync()); } @Override public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() { Mono<Response<List<PrivateLinkResource>>> retList = this.manager().serviceClient().getPrivateLinkResources() .listWithResponseAsync(this.resourceGroupName(), this.name()) .map(response -> new SimpleResponse<>(response, response.getValue().value().stream() .map(PrivateLinkResourceImpl::new) .collect(Collectors.toList()))); return PagedConverter.convertListToPagedFlux(retList); } @Override public PagedIterable<PrivateEndpointConnection> listPrivateEndpointConnections() { return new PagedIterable<>(listPrivateEndpointConnectionsAsync()); } @Override public PagedFlux<PrivateEndpointConnection> listPrivateEndpointConnectionsAsync() { Mono<Response<List<PrivateEndpointConnection>>> retList = this.manager().serviceClient() .getPrivateEndpointConnections() .listWithResponseAsync(this.resourceGroupName(), this.name()) .map(response -> new SimpleResponse<>(response, response.getValue().value().stream() .map(PrivateEndpointConnectionImpl::new) .collect(Collectors.toList()))); return PagedConverter.convertListToPagedFlux(retList); } private static final class PrivateLinkResourceImpl implements PrivateLinkResource { private final PrivateLinkResourceInner innerModel; private PrivateLinkResourceImpl(PrivateLinkResourceInner innerModel) { this.innerModel = innerModel; } @Override public String groupId() { return innerModel.groupId(); } @Override public List<String> requiredMemberNames() { return Collections.unmodifiableList(innerModel.requiredMembers()); } @Override public List<String> requiredDnsZoneNames() { return Collections.emptyList(); } } private static final class PrivateEndpointConnectionImpl implements PrivateEndpointConnection { private final PrivateEndpointConnectionInner innerModel; private final PrivateEndpoint privateEndpoint; private final com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState privateLinkServiceConnectionState; private final PrivateEndpointConnectionProvisioningState provisioningState; private PrivateEndpointConnectionImpl(PrivateEndpointConnectionInner innerModel) { this.innerModel = innerModel; this.privateEndpoint = innerModel.privateEndpoint() == null ? null : new PrivateEndpoint(innerModel.privateEndpoint().id()); this.privateLinkServiceConnectionState = innerModel.privateLinkServiceConnectionState() == null ? null : new com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState( innerModel.privateLinkServiceConnectionState().status() == null ? null : com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateEndpointServiceConnectionStatus .fromString(innerModel.privateLinkServiceConnectionState().status().toString()), innerModel.privateLinkServiceConnectionState().description(), ""); this.provisioningState = innerModel.provisioningState() == null ? null : PrivateEndpointConnectionProvisioningState.fromString(innerModel.provisioningState().toString()); } @Override public String id() { return innerModel.id(); } @Override public String name() { return innerModel.name(); } @Override public String type() { return innerModel.type(); } @Override public PrivateEndpoint privateEndpoint() { return privateEndpoint; } @Override public com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState privateLinkServiceConnectionState() { return privateLinkServiceConnectionState; } @Override public PrivateEndpointConnectionProvisioningState provisioningState() { return provisioningState; } } }
class KubernetesClusterImpl extends GroupableResourceImpl< KubernetesCluster, ManagedClusterInner, KubernetesClusterImpl, ContainerServiceManager> implements KubernetesCluster, KubernetesCluster.Definition, KubernetesCluster.Update { private final ClientLogger logger = new ClientLogger(getClass()); private List<CredentialResult> adminKubeConfigs; private List<CredentialResult> userKubeConfigs; private final Map<Format, List<CredentialResult>> formatUserKubeConfigsMap = new ConcurrentHashMap<>(); protected KubernetesClusterImpl(String name, ManagedClusterInner innerObject, ContainerServiceManager manager) { super(name, innerObject, manager); if (this.innerModel().agentPoolProfiles() == null) { this.innerModel().withAgentPoolProfiles(new ArrayList<>()); } this.adminKubeConfigs = null; this.userKubeConfigs = null; } @Override public String provisioningState() { return this.innerModel().provisioningState(); } @Override public String dnsPrefix() { return this.innerModel().dnsPrefix(); } @Override public String fqdn() { return this.innerModel().fqdn(); } @Override public String version() { return this.innerModel().kubernetesVersion(); } @Override public List<CredentialResult> adminKubeConfigs() { if (this.adminKubeConfigs == null || this.adminKubeConfigs.size() == 0) { this.adminKubeConfigs = this.manager().kubernetesClusters().listAdminKubeConfigContent(this.resourceGroupName(), this.name()); } return Collections.unmodifiableList(this.adminKubeConfigs); } @Override public List<CredentialResult> userKubeConfigs() { if (this.userKubeConfigs == null || this.userKubeConfigs.size() == 0) { this.userKubeConfigs = this.manager().kubernetesClusters().listUserKubeConfigContent(this.resourceGroupName(), this.name()); } return Collections.unmodifiableList(this.userKubeConfigs); } @Override public List<CredentialResult> userKubeConfigs(Format format) { if (format == null) { return userKubeConfigs(); } return Collections.unmodifiableList( this.formatUserKubeConfigsMap.computeIfAbsent( format, key -> KubernetesClusterImpl.this .manager() .kubernetesClusters() .listUserKubeConfigContent( KubernetesClusterImpl.this.resourceGroupName(), KubernetesClusterImpl.this.name(), format )) ); } @Override public byte[] adminKubeConfigContent() { for (CredentialResult config : adminKubeConfigs()) { return config.value(); } return new byte[0]; } @Override public byte[] userKubeConfigContent() { for (CredentialResult config : userKubeConfigs()) { return config.value(); } return new byte[0]; } @Override @Override public String servicePrincipalClientId() { if (this.innerModel().servicePrincipalProfile() != null) { return this.innerModel().servicePrincipalProfile().clientId(); } else { return null; } } @Override public String servicePrincipalSecret() { if (this.innerModel().servicePrincipalProfile() != null) { return this.innerModel().servicePrincipalProfile().secret(); } else { return null; } } @Override public String linuxRootUsername() { if (this.innerModel().linuxProfile() != null) { return this.innerModel().linuxProfile().adminUsername(); } else { return null; } } @Override public String sshKey() { if (this.innerModel().linuxProfile() == null || this.innerModel().linuxProfile().ssh() == null || this.innerModel().linuxProfile().ssh().publicKeys() == null || this.innerModel().linuxProfile().ssh().publicKeys().size() == 0) { return null; } else { return this.innerModel().linuxProfile().ssh().publicKeys().get(0).keyData(); } } @Override public Map<String, KubernetesClusterAgentPool> agentPools() { Map<String, KubernetesClusterAgentPool> agentPoolMap = new HashMap<>(); if (this.innerModel().agentPoolProfiles() != null && this.innerModel().agentPoolProfiles().size() > 0) { for (ManagedClusterAgentPoolProfile agentPoolProfile : this.innerModel().agentPoolProfiles()) { agentPoolMap.put(agentPoolProfile.name(), new KubernetesClusterAgentPoolImpl(agentPoolProfile, this)); } } return Collections.unmodifiableMap(agentPoolMap); } @Override public ContainerServiceNetworkProfile networkProfile() { return this.innerModel().networkProfile(); } @Override public Map<String, ManagedClusterAddonProfile> addonProfiles() { return Collections.unmodifiableMap(this.innerModel().addonProfiles()); } @Override public String nodeResourceGroup() { return this.innerModel().nodeResourceGroup(); } @Override public boolean enableRBAC() { return this.innerModel().enableRbac(); } @Override public PowerState powerState() { return this.innerModel().powerState(); } @Override public String systemAssignedManagedServiceIdentityPrincipalId() { String objectId = null; if (this.innerModel().identityProfile() != null) { UserAssignedIdentity identity = this.innerModel().identityProfile().get("kubeletidentity"); if (identity != null) { objectId = identity.objectId(); } } return objectId; } @Override public void start() { this.startAsync().block(); } @Override public Mono<Void> startAsync() { return manager().kubernetesClusters().startAsync(this.resourceGroupName(), this.name()); } @Override public void stop() { this.stopAsync().block(); } @Override public Mono<Void> stopAsync() { return manager().kubernetesClusters().stopAsync(this.resourceGroupName(), this.name()); } private Mono<List<CredentialResult>> listAdminConfig(final KubernetesClusterImpl self) { return this .manager() .kubernetesClusters() .listAdminKubeConfigContentAsync(self.resourceGroupName(), self.name()) .map( kubeConfigs -> { self.adminKubeConfigs = kubeConfigs; return self.adminKubeConfigs; }); } private Mono<List<CredentialResult>> listUserConfig(final KubernetesClusterImpl self) { return this .manager() .kubernetesClusters() .listUserKubeConfigContentAsync(self.resourceGroupName(), self.name()) .map( kubeConfigs -> { self.userKubeConfigs = kubeConfigs; return self.userKubeConfigs; }); } @Override protected Mono<ManagedClusterInner> getInnerAsync() { final KubernetesClusterImpl self = this; final Mono<List<CredentialResult>> adminConfig = listAdminConfig(self); final Mono<List<CredentialResult>> userConfig = listUserConfig(self); return this .manager() .serviceClient() .getManagedClusters() .getByResourceGroupAsync(this.resourceGroupName(), this.name()) .flatMap( managedClusterInner -> Flux .merge(adminConfig, userConfig) .last() .map( bytes -> { formatUserKubeConfigsMap.clear(); return managedClusterInner; })); } @Override public Mono<KubernetesCluster> createResourceAsync() { final KubernetesClusterImpl self = this; if (!this.isInCreateMode()) { this.innerModel().withServicePrincipalProfile(null); } final Mono<List<CredentialResult>> adminConfig = listAdminConfig(self); final Mono<List<CredentialResult>> userConfig = listUserConfig(self); return this .manager() .serviceClient() .getManagedClusters() .createOrUpdateAsync(self.resourceGroupName(), self.name(), self.innerModel()) .flatMap( inner -> Flux .merge(adminConfig, userConfig) .last() .map( bytes -> { self.setInner(inner); return self; })); } @Override public KubernetesClusterImpl withVersion(String kubernetesVersion) { this.innerModel().withKubernetesVersion(kubernetesVersion); return this; } @Override public KubernetesClusterImpl withDefaultVersion() { this.innerModel().withKubernetesVersion(""); return this; } @Override public KubernetesClusterImpl withRootUsername(String rootUserName) { if (this.innerModel().linuxProfile() == null) { this.innerModel().withLinuxProfile(new ContainerServiceLinuxProfile()); } this.innerModel().linuxProfile().withAdminUsername(rootUserName); return this; } @Override public KubernetesClusterImpl withSshKey(String sshKeyData) { this .innerModel() .linuxProfile() .withSsh( new ContainerServiceSshConfiguration().withPublicKeys(new ArrayList<ContainerServiceSshPublicKey>())); this.innerModel().linuxProfile().ssh().publicKeys().add( new ContainerServiceSshPublicKey().withKeyData(sshKeyData)); return this; } @Override public KubernetesClusterImpl withServicePrincipalClientId(String clientId) { this.innerModel().withServicePrincipalProfile( new ManagedClusterServicePrincipalProfile().withClientId(clientId)); return this; } @Override public KubernetesClusterImpl withSystemAssignedManagedServiceIdentity() { this.innerModel().withIdentity(new ManagedClusterIdentity().withType(ResourceIdentityType.SYSTEM_ASSIGNED)); return this; } @Override public KubernetesClusterImpl withServicePrincipalSecret(String secret) { this.innerModel().servicePrincipalProfile().withSecret(secret); return this; } @Override public KubernetesClusterImpl withDnsPrefix(String dnsPrefix) { this.innerModel().withDnsPrefix(dnsPrefix); return this; } @Override public KubernetesClusterAgentPoolImpl defineAgentPool(String name) { ManagedClusterAgentPoolProfile innerPoolProfile = new ManagedClusterAgentPoolProfile(); innerPoolProfile.withName(name); return new KubernetesClusterAgentPoolImpl(innerPoolProfile, this); } @Override public KubernetesClusterAgentPoolImpl updateAgentPool(String name) { for (ManagedClusterAgentPoolProfile agentPoolProfile : innerModel().agentPoolProfiles()) { if (agentPoolProfile.name().equals(name)) { return new KubernetesClusterAgentPoolImpl(agentPoolProfile, this); } } throw logger.logExceptionAsError(new IllegalArgumentException(String.format( "Cannot get agent pool named %s", name))); } @Override public Update withoutAgentPool(String name) { if (innerModel().agentPoolProfiles() != null) { innerModel().withAgentPoolProfiles( innerModel().agentPoolProfiles().stream() .filter(p -> !name.equals(p.name())) .collect(Collectors.toList())); this.addDependency(context -> manager().serviceClient().getAgentPools().deleteAsync(resourceGroupName(), name(), name) .then(context.voidMono())); } return this; } @Override public KubernetesCluster.DefinitionStages.NetworkProfileDefinitionStages.Blank< KubernetesCluster.DefinitionStages.WithCreate> defineNetworkProfile() { return new KubernetesClusterNetworkProfileImpl(this); } @Override public KubernetesClusterImpl withAddOnProfiles(Map<String, ManagedClusterAddonProfile> addOnProfileMap) { this.innerModel().withAddonProfiles(addOnProfileMap); return this; } @Override public KubernetesClusterImpl withNetworkProfile(ContainerServiceNetworkProfile networkProfile) { this.innerModel().withNetworkProfile(networkProfile); return this; } @Override public KubernetesClusterImpl withRBACEnabled() { this.innerModel().withEnableRbac(true); return this; } @Override public KubernetesClusterImpl withRBACDisabled() { this.innerModel().withEnableRbac(false); return this; } public KubernetesClusterImpl addNewAgentPool(KubernetesClusterAgentPoolImpl agentPool) { if (!isInCreateMode()) { this.addDependency(context -> manager().serviceClient().getAgentPools().createOrUpdateAsync( resourceGroupName(), name(), agentPool.name(), agentPool.getAgentPoolInner()) .then(context.voidMono())); } innerModel().agentPoolProfiles().add(agentPool.innerModel()); return this; } @Override public KubernetesClusterImpl withAutoScalerProfile(ManagedClusterPropertiesAutoScalerProfile autoScalerProfile) { this.innerModel().withAutoScalerProfile(autoScalerProfile); return this; } @Override public KubernetesClusterImpl enablePrivateCluster() { if (innerModel().apiServerAccessProfile() == null) { innerModel().withApiServerAccessProfile(new ManagedClusterApiServerAccessProfile()); } innerModel().apiServerAccessProfile().withEnablePrivateCluster(true); return this; } @Override public PagedIterable<PrivateLinkResource> listPrivateLinkResources() { return new PagedIterable<>(listPrivateLinkResourcesAsync()); } @Override public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() { Mono<Response<List<PrivateLinkResource>>> retList = this.manager().serviceClient().getPrivateLinkResources() .listWithResponseAsync(this.resourceGroupName(), this.name()) .map(response -> new SimpleResponse<>(response, response.getValue().value().stream() .map(PrivateLinkResourceImpl::new) .collect(Collectors.toList()))); return PagedConverter.convertListToPagedFlux(retList); } @Override public PagedIterable<PrivateEndpointConnection> listPrivateEndpointConnections() { return new PagedIterable<>(listPrivateEndpointConnectionsAsync()); } @Override public PagedFlux<PrivateEndpointConnection> listPrivateEndpointConnectionsAsync() { Mono<Response<List<PrivateEndpointConnection>>> retList = this.manager().serviceClient() .getPrivateEndpointConnections() .listWithResponseAsync(this.resourceGroupName(), this.name()) .map(response -> new SimpleResponse<>(response, response.getValue().value().stream() .map(PrivateEndpointConnectionImpl::new) .collect(Collectors.toList()))); return PagedConverter.convertListToPagedFlux(retList); } private static final class PrivateLinkResourceImpl implements PrivateLinkResource { private final PrivateLinkResourceInner innerModel; private PrivateLinkResourceImpl(PrivateLinkResourceInner innerModel) { this.innerModel = innerModel; } @Override public String groupId() { return innerModel.groupId(); } @Override public List<String> requiredMemberNames() { return Collections.unmodifiableList(innerModel.requiredMembers()); } @Override public List<String> requiredDnsZoneNames() { return Collections.emptyList(); } } private static final class PrivateEndpointConnectionImpl implements PrivateEndpointConnection { private final PrivateEndpointConnectionInner innerModel; private final PrivateEndpoint privateEndpoint; private final com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState privateLinkServiceConnectionState; private final PrivateEndpointConnectionProvisioningState provisioningState; private PrivateEndpointConnectionImpl(PrivateEndpointConnectionInner innerModel) { this.innerModel = innerModel; this.privateEndpoint = innerModel.privateEndpoint() == null ? null : new PrivateEndpoint(innerModel.privateEndpoint().id()); this.privateLinkServiceConnectionState = innerModel.privateLinkServiceConnectionState() == null ? null : new com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState( innerModel.privateLinkServiceConnectionState().status() == null ? null : com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateEndpointServiceConnectionStatus .fromString(innerModel.privateLinkServiceConnectionState().status().toString()), innerModel.privateLinkServiceConnectionState().description(), ""); this.provisioningState = innerModel.provisioningState() == null ? null : PrivateEndpointConnectionProvisioningState.fromString(innerModel.provisioningState().toString()); } @Override public String id() { return innerModel.id(); } @Override public String name() { return innerModel.name(); } @Override public String type() { return innerModel.type(); } @Override public PrivateEndpoint privateEndpoint() { return privateEndpoint; } @Override public com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState privateLinkServiceConnectionState() { return privateLinkServiceConnectionState; } @Override public PrivateEndpointConnectionProvisioningState provisioningState() { return provisioningState; } } }
We should not use the ClientLogger here.
private static void validateLength(String namespace) { if (namespace.length() < 6 || namespace.length() > 50) { LOGGER.warning(LENGTH_ERROR); } }
LOGGER.warning(LENGTH_ERROR);
private static void validateLength(String namespace) { if (namespace.length() < 6 || namespace.length() > 50) { throw new IllegalArgumentException(LENGTH_ERROR); } }
class PropertiesValidator { private static final ClientLogger LOGGER = new ClientLogger(PropertiesValidator.class); public static final String LENGTH_ERROR = "The namespace must be between 6 and 50 characters long."; public static final String ILLEGAL_SYMBOL_ERROR = "The namespace can contain only letters, numbers, and hyphens."; public static final String START_SYMBOL_ERROR = "The namespace must start with a letter."; public static final String END_SYMBOL_ERROR = "The namespace must end with a letter or number."; public static void validateNamespace(String namespace) { validateLength(namespace); validateIllegalSymbol(namespace); validateStartingSymbol(namespace); validateEndingSymbol(namespace); } private static void validateIllegalSymbol(String namespace) { if (!namespace.matches("[a-z0-9A-Z-]+")) { LOGGER.warning(ILLEGAL_SYMBOL_ERROR); } } private static void validateStartingSymbol(String namespace) { if (!Character.isLetter(namespace.charAt(0))) { LOGGER.warning(START_SYMBOL_ERROR); } } private static void validateEndingSymbol(String namespace) { if (!Character.isLetterOrDigit(namespace.charAt(namespace.length() - 1))) { LOGGER.warning(END_SYMBOL_ERROR); } } }
class PropertiesValidator { public static final String LENGTH_ERROR = "The namespace must be between 6 and 50 characters long."; public static final String ILLEGAL_SYMBOL_ERROR = "The namespace can contain only letters, numbers, and hyphens."; public static final String START_SYMBOL_ERROR = "The namespace must start with a letter."; public static final String END_SYMBOL_ERROR = "The namespace must end with a letter or number."; public static void validateNamespace(String namespace) { validateLength(namespace); validateIllegalSymbol(namespace); validateStartingSymbol(namespace); validateEndingSymbol(namespace); } private static void validateIllegalSymbol(String namespace) { if (!namespace.matches("[a-z0-9A-Z-]+")) { throw new IllegalArgumentException(ILLEGAL_SYMBOL_ERROR); } } private static void validateStartingSymbol(String namespace) { if (!Character.isLetter(namespace.charAt(0))) { throw new IllegalArgumentException(START_SYMBOL_ERROR); } } private static void validateEndingSymbol(String namespace) { if (!Character.isLetterOrDigit(namespace.charAt(namespace.length() - 1))) { throw new IllegalArgumentException(END_SYMBOL_ERROR); } } }
Should this new `String` be enforced to UTF-8? #Pending
private static String byteArrayToHex(byte[] bytes) { char[] hexChars = new char[bytes.length * 2]; for (int j = 0; j < bytes.length; j++) { int v = bytes[j] & 0xFF; hexChars[j * 2] = HEX_ARRAY[v >>> 4]; hexChars[j * 2 + 1] = HEX_ARRAY[v & 0x0F]; } return new String(hexChars); }
return new String(hexChars);
private static String byteArrayToHex(byte[] bytes) { char[] hexChars = new char[bytes.length * 2]; for (int j = 0; j < bytes.length; j++) { int v = bytes[j] & 0xFF; hexChars[j * 2] = HEX_ARRAY[v >>> 4]; hexChars[j * 2 + 1] = HEX_ARRAY[v & 0x0F]; } return new String(hexChars); }
class UtilsImpl { private static final String CLIENT_NAME; private static final String CLIENT_VERSION; private static final int HTTP_STATUS_CODE_NOT_FOUND; private static final int HTTP_STATUS_CODE_ACCEPTED; private static final String CONTINUATION_LINK_HEADER_NAME; private static final Pattern CONTINUATION_LINK_PATTERN; public static final String OCI_MANIFEST_MEDIA_TYPE; public static final String DOCKER_DIGEST_HEADER_NAME; public static final String CONTAINER_REGISTRY_TRACING_NAMESPACE_VALUE; private static final ClientLogger LOGGER; static { LOGGER = new ClientLogger(UtilsImpl.class); Map<String, String> properties = CoreUtils.getProperties("azure-containers-containerregistry.properties"); CLIENT_NAME = properties.getOrDefault("name", "UnknownName"); CLIENT_VERSION = properties.getOrDefault("version", "UnknownVersion"); HTTP_STATUS_CODE_NOT_FOUND = 404; HTTP_STATUS_CODE_ACCEPTED = 202; OCI_MANIFEST_MEDIA_TYPE = "application/vnd.oci.image.manifest.v1+json"; DOCKER_DIGEST_HEADER_NAME = "Docker-Content-Digest"; CONTINUATION_LINK_HEADER_NAME = "Link"; CONTINUATION_LINK_PATTERN = Pattern.compile("<(.+)>;.*"); CONTAINER_REGISTRY_TRACING_NAMESPACE_VALUE = "Microsoft.ContainerRegistry"; } private UtilsImpl() { } /** * This method builds the httpPipeline for the builders. * @param clientOptions The client options * @param logOptions http log options. * @param configuration configuration settings. * @param retryPolicy retry policy * @param retryOptions retry options * @param credential credentials. * @param perCallPolicies per call policies. * @param perRetryPolicies per retry policies. * @param httpClient http client * @param endpoint endpoint to be called * @param serviceVersion the service api version being targeted by the client. * @return returns the httpPipeline to be consumed by the builders. */ public static HttpPipeline buildHttpPipeline( ClientOptions clientOptions, HttpLogOptions logOptions, Configuration configuration, RetryPolicy retryPolicy, RetryOptions retryOptions, TokenCredential credential, ContainerRegistryAudience audience, List<HttpPipelinePolicy> perCallPolicies, List<HttpPipelinePolicy> perRetryPolicies, HttpClient httpClient, String endpoint, ContainerRegistryServiceVersion serviceVersion, ClientLogger logger) { ArrayList<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add( new UserAgentPolicy(CoreUtils.getApplicationId(clientOptions, logOptions), CLIENT_NAME, CLIENT_VERSION, configuration)); policies.add(new RequestIdPolicy()); policies.addAll(perCallPolicies); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(ClientBuilderUtil.validateAndGetRetryPolicy(retryPolicy, retryOptions)); policies.add(new CookiePolicy()); policies.add(new AddDatePolicy()); policies.addAll(perRetryPolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); HttpLoggingPolicy loggingPolicy = new HttpLoggingPolicy(logOptions); if (credential == null) { logger.verbose("Credentials are null, enabling anonymous access"); } ArrayList<HttpPipelinePolicy> credentialPolicies = clone(policies); credentialPolicies.add(loggingPolicy); if (audience == null) { audience = ContainerRegistryAudience.AZURE_RESOURCE_MANAGER_PUBLIC_CLOUD; } ContainerRegistryTokenService tokenService = new ContainerRegistryTokenService( credential, audience, endpoint, serviceVersion, new HttpPipelineBuilder() .policies(credentialPolicies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(), JacksonAdapter.createDefaultSerializerAdapter()); ContainerRegistryCredentialsPolicy credentialsPolicy = new ContainerRegistryCredentialsPolicy(tokenService); policies.add(credentialsPolicy); policies.add(loggingPolicy); HttpPipeline httpPipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return httpPipeline; } private static ArrayList<HttpPipelinePolicy> clone(ArrayList<HttpPipelinePolicy> policies) { ArrayList<HttpPipelinePolicy> clonedPolicy = new ArrayList<>(); for (HttpPipelinePolicy policy:policies) { clonedPolicy.add(policy); } return clonedPolicy; } /** * This method computes the digest for the buffer content. * Docker digest is a SHA256 hash of the docker image content and is deterministic based on the image build. * @param buffer The buffer containing the image bytes. * @return SHA-256 digest for the given buffer. */ public static String computeDigest(ByteBuffer buffer) { ByteBuffer readOnlyBuffer = buffer.asReadOnlyBuffer(); try { MessageDigest md = MessageDigest.getInstance("SHA-256"); md.update(readOnlyBuffer); byte[] digest = md.digest(); return "sha256:" + byteArrayToHex(digest); } catch (NoSuchAlgorithmException e) { LOGGER.error("SHA-256 conversion failed with" + e.getMessage()); throw new RuntimeException(e); } } private static final char[] HEX_ARRAY = "0123456789abcdef".toCharArray(); /** * Delete operation should be idempotent. * And so should result in a success in case the service response is 400 : Not found. * @param responseT The response object. * @param <T> The encapsulating value. * @return The transformed response object. */ public static <T> Mono<Response<Void>> deleteResponseToSuccess(Response<T> responseT) { if (responseT.getStatusCode() != HTTP_STATUS_CODE_NOT_FOUND) { return getAcceptedDeleteResponse(responseT, responseT.getStatusCode()); } return getAcceptedDeleteResponse(responseT, HTTP_STATUS_CODE_ACCEPTED); } static <T> Mono<Response<Void>> getAcceptedDeleteResponse(Response<T> responseT, int statusCode) { return Mono.just(new ResponseBase<String, Void>( responseT.getRequest(), statusCode, responseT.getHeaders(), null, null)); } /** * This method converts the API response codes into well known exceptions. * @param exception The exception returned by the rest client. * @return The exception returned by the public methods. */ public static Throwable mapException(Throwable exception) { AcrErrorsException acrException = null; if (exception instanceof AcrErrorsException) { acrException = ((AcrErrorsException) exception); } else if (exception instanceof RuntimeException) { RuntimeException runtimeException = (RuntimeException) exception; Throwable throwable = runtimeException.getCause(); if (throwable instanceof AcrErrorsException) { acrException = (AcrErrorsException) throwable; } } if (acrException == null) { return exception; } final HttpResponse errorHttpResponse = acrException.getResponse(); final int statusCode = errorHttpResponse.getStatusCode(); final String errorDetail = acrException.getMessage(); switch (statusCode) { case 401: return new ClientAuthenticationException(errorDetail, acrException.getResponse(), exception); case 404: return new ResourceNotFoundException(errorDetail, acrException.getResponse(), exception); case 409: return new ResourceExistsException(errorDetail, acrException.getResponse(), exception); case 412: return new ResourceModifiedException(errorDetail, acrException.getResponse(), exception); default: return new HttpResponseException(errorDetail, acrException.getResponse(), exception); } } /** * This method parses the response to get the continuation token used to make the next pagination call. * The continuation token is returned by the service in the form of a header and not as a nextLink field. * @param listResponse response that is parsed. * @param <T> the model type that is being operated on. * @return paged response with the correct continuation token. */ public static <T> PagedResponse<T> getPagedResponseWithContinuationToken(PagedResponse<T> listResponse) { return getPagedResponseWithContinuationToken(listResponse, values -> values); } /** * This method parses the response to get the continuation token used to make the next pagination call. * The continuation token is returned by the service in the form of a header and not as a nextLink field. * * <p> * Per the Docker v2 HTTP API spec, the Link header is an RFC5988 * compliant rel='next' with URL to next result set, if available. * See: https: * * The URI reference can be obtained from link-value as follows: * Link = "Link" ":" * link-value = "<" URI-Reference ">" * (";" link-param ) * See: https: * </p> * @param listResponse response that is parsed. * @param mapperFunction the function that maps the rest api response into the public model exposed by the client. * @param <T> The model type returned by the rest client. * @param <R> The model type returned by the public client. * @return paged response with the correct continuation token. */ public static <T, R> PagedResponse<T> getPagedResponseWithContinuationToken(PagedResponse<R> listResponse, Function<List<R>, List<T>> mapperFunction) { Objects.requireNonNull(mapperFunction); String continuationLink = null; HttpHeaders headers = listResponse.getHeaders(); if (headers != null) { String continuationLinkHeader = headers.getValue(CONTINUATION_LINK_HEADER_NAME); if (!CoreUtils.isNullOrEmpty(continuationLinkHeader)) { Matcher matcher = CONTINUATION_LINK_PATTERN.matcher(continuationLinkHeader); if (matcher.matches()) { if (matcher.groupCount() == 1) { continuationLink = matcher.group(1); } } } } List<T> values = mapperFunction.apply(listResponse.getValue()); return new PagedResponseBase<String, T>( listResponse.getRequest(), listResponse.getStatusCode(), listResponse.getHeaders(), values, continuationLink, null ); } }
class UtilsImpl { private static final String CLIENT_NAME; private static final String CLIENT_VERSION; private static final int HTTP_STATUS_CODE_NOT_FOUND; private static final int HTTP_STATUS_CODE_ACCEPTED; private static final String CONTINUATION_LINK_HEADER_NAME; private static final Pattern CONTINUATION_LINK_PATTERN; public static final String OCI_MANIFEST_MEDIA_TYPE; public static final String DOCKER_DIGEST_HEADER_NAME; public static final String CONTAINER_REGISTRY_TRACING_NAMESPACE_VALUE; private static final ClientLogger LOGGER; static { LOGGER = new ClientLogger(UtilsImpl.class); Map<String, String> properties = CoreUtils.getProperties("azure-containers-containerregistry.properties"); CLIENT_NAME = properties.getOrDefault("name", "UnknownName"); CLIENT_VERSION = properties.getOrDefault("version", "UnknownVersion"); HTTP_STATUS_CODE_NOT_FOUND = 404; HTTP_STATUS_CODE_ACCEPTED = 202; OCI_MANIFEST_MEDIA_TYPE = "application/vnd.oci.image.manifest.v1+json"; DOCKER_DIGEST_HEADER_NAME = "Docker-Content-Digest"; CONTINUATION_LINK_HEADER_NAME = "Link"; CONTINUATION_LINK_PATTERN = Pattern.compile("<(.+)>;.*"); CONTAINER_REGISTRY_TRACING_NAMESPACE_VALUE = "Microsoft.ContainerRegistry"; } private UtilsImpl() { } /** * This method builds the httpPipeline for the builders. * @param clientOptions The client options * @param logOptions http log options. * @param configuration configuration settings. * @param retryPolicy retry policy * @param retryOptions retry options * @param credential credentials. * @param perCallPolicies per call policies. * @param perRetryPolicies per retry policies. * @param httpClient http client * @param endpoint endpoint to be called * @param serviceVersion the service api version being targeted by the client. * @return returns the httpPipeline to be consumed by the builders. */ public static HttpPipeline buildHttpPipeline( ClientOptions clientOptions, HttpLogOptions logOptions, Configuration configuration, RetryPolicy retryPolicy, RetryOptions retryOptions, TokenCredential credential, ContainerRegistryAudience audience, List<HttpPipelinePolicy> perCallPolicies, List<HttpPipelinePolicy> perRetryPolicies, HttpClient httpClient, String endpoint, ContainerRegistryServiceVersion serviceVersion, ClientLogger logger) { ArrayList<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add( new UserAgentPolicy(CoreUtils.getApplicationId(clientOptions, logOptions), CLIENT_NAME, CLIENT_VERSION, configuration)); policies.add(new RequestIdPolicy()); policies.addAll(perCallPolicies); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(ClientBuilderUtil.validateAndGetRetryPolicy(retryPolicy, retryOptions)); policies.add(new CookiePolicy()); policies.add(new AddDatePolicy()); policies.addAll(perRetryPolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); HttpLoggingPolicy loggingPolicy = new HttpLoggingPolicy(logOptions); if (credential == null) { logger.verbose("Credentials are null, enabling anonymous access"); } ArrayList<HttpPipelinePolicy> credentialPolicies = clone(policies); credentialPolicies.add(loggingPolicy); if (audience == null) { audience = ContainerRegistryAudience.AZURE_RESOURCE_MANAGER_PUBLIC_CLOUD; } ContainerRegistryTokenService tokenService = new ContainerRegistryTokenService( credential, audience, endpoint, serviceVersion, new HttpPipelineBuilder() .policies(credentialPolicies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(), JacksonAdapter.createDefaultSerializerAdapter()); ContainerRegistryCredentialsPolicy credentialsPolicy = new ContainerRegistryCredentialsPolicy(tokenService); policies.add(credentialsPolicy); policies.add(loggingPolicy); HttpPipeline httpPipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return httpPipeline; } private static ArrayList<HttpPipelinePolicy> clone(ArrayList<HttpPipelinePolicy> policies) { ArrayList<HttpPipelinePolicy> clonedPolicy = new ArrayList<>(); for (HttpPipelinePolicy policy:policies) { clonedPolicy.add(policy); } return clonedPolicy; } /** * This method computes the digest for the buffer content. * Docker digest is a SHA256 hash of the docker image content and is deterministic based on the image build. * @param buffer The buffer containing the image bytes. * @return SHA-256 digest for the given buffer. */ public static String computeDigest(ByteBuffer buffer) { ByteBuffer readOnlyBuffer = buffer.asReadOnlyBuffer(); try { MessageDigest md = MessageDigest.getInstance("SHA-256"); md.update(readOnlyBuffer); byte[] digest = md.digest(); return "sha256:" + byteArrayToHex(digest); } catch (NoSuchAlgorithmException e) { LOGGER.error("SHA-256 conversion failed with" + e); throw new RuntimeException(e); } } private static final char[] HEX_ARRAY = "0123456789abcdef".toCharArray(); /** * Delete operation should be idempotent. * And so should result in a success in case the service response is 400 : Not found. * @param responseT The response object. * @param <T> The encapsulating value. * @return The transformed response object. */ public static <T> Mono<Response<Void>> deleteResponseToSuccess(Response<T> responseT) { if (responseT.getStatusCode() != HTTP_STATUS_CODE_NOT_FOUND) { return getAcceptedDeleteResponse(responseT, responseT.getStatusCode()); } return getAcceptedDeleteResponse(responseT, HTTP_STATUS_CODE_ACCEPTED); } static <T> Mono<Response<Void>> getAcceptedDeleteResponse(Response<T> responseT, int statusCode) { return Mono.just(new SimpleResponse<Void>( responseT.getRequest(), statusCode, responseT.getHeaders(), null)); } /** * This method converts the API response codes into well known exceptions. * @param exception The exception returned by the rest client. * @return The exception returned by the public methods. */ public static Throwable mapException(Throwable exception) { AcrErrorsException acrException = null; if (exception instanceof AcrErrorsException) { acrException = ((AcrErrorsException) exception); } else if (exception instanceof RuntimeException) { RuntimeException runtimeException = (RuntimeException) exception; Throwable throwable = runtimeException.getCause(); if (throwable instanceof AcrErrorsException) { acrException = (AcrErrorsException) throwable; } } if (acrException == null) { return exception; } final HttpResponse errorHttpResponse = acrException.getResponse(); final int statusCode = errorHttpResponse.getStatusCode(); final String errorDetail = acrException.getMessage(); switch (statusCode) { case 401: return new ClientAuthenticationException(errorDetail, acrException.getResponse(), exception); case 404: return new ResourceNotFoundException(errorDetail, acrException.getResponse(), exception); case 409: return new ResourceExistsException(errorDetail, acrException.getResponse(), exception); case 412: return new ResourceModifiedException(errorDetail, acrException.getResponse(), exception); default: return new HttpResponseException(errorDetail, acrException.getResponse(), exception); } } /** * This method parses the response to get the continuation token used to make the next pagination call. * The continuation token is returned by the service in the form of a header and not as a nextLink field. * @param listResponse response that is parsed. * @param <T> the model type that is being operated on. * @return paged response with the correct continuation token. */ public static <T> PagedResponse<T> getPagedResponseWithContinuationToken(PagedResponse<T> listResponse) { return getPagedResponseWithContinuationToken(listResponse, values -> values); } /** * This method parses the response to get the continuation token used to make the next pagination call. * The continuation token is returned by the service in the form of a header and not as a nextLink field. * * <p> * Per the Docker v2 HTTP API spec, the Link header is an RFC5988 * compliant rel='next' with URL to next result set, if available. * See: https: * * The URI reference can be obtained from link-value as follows: * Link = "Link" ":" * link-value = "<" URI-Reference ">" * (";" link-param ) * See: https: * </p> * @param listResponse response that is parsed. * @param mapperFunction the function that maps the rest api response into the public model exposed by the client. * @param <T> The model type returned by the rest client. * @param <R> The model type returned by the public client. * @return paged response with the correct continuation token. */ public static <T, R> PagedResponse<T> getPagedResponseWithContinuationToken(PagedResponse<R> listResponse, Function<List<R>, List<T>> mapperFunction) { Objects.requireNonNull(mapperFunction); String continuationLink = null; HttpHeaders headers = listResponse.getHeaders(); if (headers != null) { String continuationLinkHeader = headers.getValue(CONTINUATION_LINK_HEADER_NAME); if (!CoreUtils.isNullOrEmpty(continuationLinkHeader)) { Matcher matcher = CONTINUATION_LINK_PATTERN.matcher(continuationLinkHeader); if (matcher.matches()) { if (matcher.groupCount() == 1) { continuationLink = matcher.group(1); } } } } List<T> values = mapperFunction.apply(listResponse.getValue()); return new PagedResponseBase<String, T>( listResponse.getRequest(), listResponse.getStatusCode(), listResponse.getHeaders(), values, continuationLink, null ); } }
```suggestion LOGGER.error("SHA-256 conversion failed.", e); ``` I'd let the logger handle how the exception should be logged #Resolved
public static String computeDigest(ByteBuffer buffer) { ByteBuffer readOnlyBuffer = buffer.asReadOnlyBuffer(); try { MessageDigest md = MessageDigest.getInstance("SHA-256"); md.update(readOnlyBuffer); byte[] digest = md.digest(); return "sha256:" + byteArrayToHex(digest); } catch (NoSuchAlgorithmException e) { LOGGER.error("SHA-256 conversion failed with" + e.getMessage()); throw new RuntimeException(e); } }
LOGGER.error("SHA-256 conversion failed with" + e.getMessage());
public static String computeDigest(ByteBuffer buffer) { ByteBuffer readOnlyBuffer = buffer.asReadOnlyBuffer(); try { MessageDigest md = MessageDigest.getInstance("SHA-256"); md.update(readOnlyBuffer); byte[] digest = md.digest(); return "sha256:" + byteArrayToHex(digest); } catch (NoSuchAlgorithmException e) { LOGGER.error("SHA-256 conversion failed with" + e); throw new RuntimeException(e); } }
class UtilsImpl { private static final String CLIENT_NAME; private static final String CLIENT_VERSION; private static final int HTTP_STATUS_CODE_NOT_FOUND; private static final int HTTP_STATUS_CODE_ACCEPTED; private static final String CONTINUATION_LINK_HEADER_NAME; private static final Pattern CONTINUATION_LINK_PATTERN; public static final String OCI_MANIFEST_MEDIA_TYPE; public static final String DOCKER_DIGEST_HEADER_NAME; public static final String CONTAINER_REGISTRY_TRACING_NAMESPACE_VALUE; private static final ClientLogger LOGGER; static { LOGGER = new ClientLogger(UtilsImpl.class); Map<String, String> properties = CoreUtils.getProperties("azure-containers-containerregistry.properties"); CLIENT_NAME = properties.getOrDefault("name", "UnknownName"); CLIENT_VERSION = properties.getOrDefault("version", "UnknownVersion"); HTTP_STATUS_CODE_NOT_FOUND = 404; HTTP_STATUS_CODE_ACCEPTED = 202; OCI_MANIFEST_MEDIA_TYPE = "application/vnd.oci.image.manifest.v1+json"; DOCKER_DIGEST_HEADER_NAME = "Docker-Content-Digest"; CONTINUATION_LINK_HEADER_NAME = "Link"; CONTINUATION_LINK_PATTERN = Pattern.compile("<(.+)>;.*"); CONTAINER_REGISTRY_TRACING_NAMESPACE_VALUE = "Microsoft.ContainerRegistry"; } private UtilsImpl() { } /** * This method builds the httpPipeline for the builders. * @param clientOptions The client options * @param logOptions http log options. * @param configuration configuration settings. * @param retryPolicy retry policy * @param retryOptions retry options * @param credential credentials. * @param perCallPolicies per call policies. * @param perRetryPolicies per retry policies. * @param httpClient http client * @param endpoint endpoint to be called * @param serviceVersion the service api version being targeted by the client. * @return returns the httpPipeline to be consumed by the builders. */ public static HttpPipeline buildHttpPipeline( ClientOptions clientOptions, HttpLogOptions logOptions, Configuration configuration, RetryPolicy retryPolicy, RetryOptions retryOptions, TokenCredential credential, ContainerRegistryAudience audience, List<HttpPipelinePolicy> perCallPolicies, List<HttpPipelinePolicy> perRetryPolicies, HttpClient httpClient, String endpoint, ContainerRegistryServiceVersion serviceVersion, ClientLogger logger) { ArrayList<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add( new UserAgentPolicy(CoreUtils.getApplicationId(clientOptions, logOptions), CLIENT_NAME, CLIENT_VERSION, configuration)); policies.add(new RequestIdPolicy()); policies.addAll(perCallPolicies); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(ClientBuilderUtil.validateAndGetRetryPolicy(retryPolicy, retryOptions)); policies.add(new CookiePolicy()); policies.add(new AddDatePolicy()); policies.addAll(perRetryPolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); HttpLoggingPolicy loggingPolicy = new HttpLoggingPolicy(logOptions); if (credential == null) { logger.verbose("Credentials are null, enabling anonymous access"); } ArrayList<HttpPipelinePolicy> credentialPolicies = clone(policies); credentialPolicies.add(loggingPolicy); if (audience == null) { audience = ContainerRegistryAudience.AZURE_RESOURCE_MANAGER_PUBLIC_CLOUD; } ContainerRegistryTokenService tokenService = new ContainerRegistryTokenService( credential, audience, endpoint, serviceVersion, new HttpPipelineBuilder() .policies(credentialPolicies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(), JacksonAdapter.createDefaultSerializerAdapter()); ContainerRegistryCredentialsPolicy credentialsPolicy = new ContainerRegistryCredentialsPolicy(tokenService); policies.add(credentialsPolicy); policies.add(loggingPolicy); HttpPipeline httpPipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return httpPipeline; } private static ArrayList<HttpPipelinePolicy> clone(ArrayList<HttpPipelinePolicy> policies) { ArrayList<HttpPipelinePolicy> clonedPolicy = new ArrayList<>(); for (HttpPipelinePolicy policy:policies) { clonedPolicy.add(policy); } return clonedPolicy; } /** * This method computes the digest for the buffer content. * Docker digest is a SHA256 hash of the docker image content and is deterministic based on the image build. * @param buffer The buffer containing the image bytes. * @return SHA-256 digest for the given buffer. */ private static final char[] HEX_ARRAY = "0123456789abcdef".toCharArray(); private static String byteArrayToHex(byte[] bytes) { char[] hexChars = new char[bytes.length * 2]; for (int j = 0; j < bytes.length; j++) { int v = bytes[j] & 0xFF; hexChars[j * 2] = HEX_ARRAY[v >>> 4]; hexChars[j * 2 + 1] = HEX_ARRAY[v & 0x0F]; } return new String(hexChars); } /** * Delete operation should be idempotent. * And so should result in a success in case the service response is 400 : Not found. * @param responseT The response object. * @param <T> The encapsulating value. * @return The transformed response object. */ public static <T> Mono<Response<Void>> deleteResponseToSuccess(Response<T> responseT) { if (responseT.getStatusCode() != HTTP_STATUS_CODE_NOT_FOUND) { return getAcceptedDeleteResponse(responseT, responseT.getStatusCode()); } return getAcceptedDeleteResponse(responseT, HTTP_STATUS_CODE_ACCEPTED); } static <T> Mono<Response<Void>> getAcceptedDeleteResponse(Response<T> responseT, int statusCode) { return Mono.just(new ResponseBase<String, Void>( responseT.getRequest(), statusCode, responseT.getHeaders(), null, null)); } /** * This method converts the API response codes into well known exceptions. * @param exception The exception returned by the rest client. * @return The exception returned by the public methods. */ public static Throwable mapException(Throwable exception) { AcrErrorsException acrException = null; if (exception instanceof AcrErrorsException) { acrException = ((AcrErrorsException) exception); } else if (exception instanceof RuntimeException) { RuntimeException runtimeException = (RuntimeException) exception; Throwable throwable = runtimeException.getCause(); if (throwable instanceof AcrErrorsException) { acrException = (AcrErrorsException) throwable; } } if (acrException == null) { return exception; } final HttpResponse errorHttpResponse = acrException.getResponse(); final int statusCode = errorHttpResponse.getStatusCode(); final String errorDetail = acrException.getMessage(); switch (statusCode) { case 401: return new ClientAuthenticationException(errorDetail, acrException.getResponse(), exception); case 404: return new ResourceNotFoundException(errorDetail, acrException.getResponse(), exception); case 409: return new ResourceExistsException(errorDetail, acrException.getResponse(), exception); case 412: return new ResourceModifiedException(errorDetail, acrException.getResponse(), exception); default: return new HttpResponseException(errorDetail, acrException.getResponse(), exception); } } /** * This method parses the response to get the continuation token used to make the next pagination call. * The continuation token is returned by the service in the form of a header and not as a nextLink field. * @param listResponse response that is parsed. * @param <T> the model type that is being operated on. * @return paged response with the correct continuation token. */ public static <T> PagedResponse<T> getPagedResponseWithContinuationToken(PagedResponse<T> listResponse) { return getPagedResponseWithContinuationToken(listResponse, values -> values); } /** * This method parses the response to get the continuation token used to make the next pagination call. * The continuation token is returned by the service in the form of a header and not as a nextLink field. * * <p> * Per the Docker v2 HTTP API spec, the Link header is an RFC5988 * compliant rel='next' with URL to next result set, if available. * See: https: * * The URI reference can be obtained from link-value as follows: * Link = "Link" ":" * link-value = "<" URI-Reference ">" * (";" link-param ) * See: https: * </p> * @param listResponse response that is parsed. * @param mapperFunction the function that maps the rest api response into the public model exposed by the client. * @param <T> The model type returned by the rest client. * @param <R> The model type returned by the public client. * @return paged response with the correct continuation token. */ public static <T, R> PagedResponse<T> getPagedResponseWithContinuationToken(PagedResponse<R> listResponse, Function<List<R>, List<T>> mapperFunction) { Objects.requireNonNull(mapperFunction); String continuationLink = null; HttpHeaders headers = listResponse.getHeaders(); if (headers != null) { String continuationLinkHeader = headers.getValue(CONTINUATION_LINK_HEADER_NAME); if (!CoreUtils.isNullOrEmpty(continuationLinkHeader)) { Matcher matcher = CONTINUATION_LINK_PATTERN.matcher(continuationLinkHeader); if (matcher.matches()) { if (matcher.groupCount() == 1) { continuationLink = matcher.group(1); } } } } List<T> values = mapperFunction.apply(listResponse.getValue()); return new PagedResponseBase<String, T>( listResponse.getRequest(), listResponse.getStatusCode(), listResponse.getHeaders(), values, continuationLink, null ); } }
class UtilsImpl { private static final String CLIENT_NAME; private static final String CLIENT_VERSION; private static final int HTTP_STATUS_CODE_NOT_FOUND; private static final int HTTP_STATUS_CODE_ACCEPTED; private static final String CONTINUATION_LINK_HEADER_NAME; private static final Pattern CONTINUATION_LINK_PATTERN; public static final String OCI_MANIFEST_MEDIA_TYPE; public static final String DOCKER_DIGEST_HEADER_NAME; public static final String CONTAINER_REGISTRY_TRACING_NAMESPACE_VALUE; private static final ClientLogger LOGGER; static { LOGGER = new ClientLogger(UtilsImpl.class); Map<String, String> properties = CoreUtils.getProperties("azure-containers-containerregistry.properties"); CLIENT_NAME = properties.getOrDefault("name", "UnknownName"); CLIENT_VERSION = properties.getOrDefault("version", "UnknownVersion"); HTTP_STATUS_CODE_NOT_FOUND = 404; HTTP_STATUS_CODE_ACCEPTED = 202; OCI_MANIFEST_MEDIA_TYPE = "application/vnd.oci.image.manifest.v1+json"; DOCKER_DIGEST_HEADER_NAME = "Docker-Content-Digest"; CONTINUATION_LINK_HEADER_NAME = "Link"; CONTINUATION_LINK_PATTERN = Pattern.compile("<(.+)>;.*"); CONTAINER_REGISTRY_TRACING_NAMESPACE_VALUE = "Microsoft.ContainerRegistry"; } private UtilsImpl() { } /** * This method builds the httpPipeline for the builders. * @param clientOptions The client options * @param logOptions http log options. * @param configuration configuration settings. * @param retryPolicy retry policy * @param retryOptions retry options * @param credential credentials. * @param perCallPolicies per call policies. * @param perRetryPolicies per retry policies. * @param httpClient http client * @param endpoint endpoint to be called * @param serviceVersion the service api version being targeted by the client. * @return returns the httpPipeline to be consumed by the builders. */ public static HttpPipeline buildHttpPipeline( ClientOptions clientOptions, HttpLogOptions logOptions, Configuration configuration, RetryPolicy retryPolicy, RetryOptions retryOptions, TokenCredential credential, ContainerRegistryAudience audience, List<HttpPipelinePolicy> perCallPolicies, List<HttpPipelinePolicy> perRetryPolicies, HttpClient httpClient, String endpoint, ContainerRegistryServiceVersion serviceVersion, ClientLogger logger) { ArrayList<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add( new UserAgentPolicy(CoreUtils.getApplicationId(clientOptions, logOptions), CLIENT_NAME, CLIENT_VERSION, configuration)); policies.add(new RequestIdPolicy()); policies.addAll(perCallPolicies); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(ClientBuilderUtil.validateAndGetRetryPolicy(retryPolicy, retryOptions)); policies.add(new CookiePolicy()); policies.add(new AddDatePolicy()); policies.addAll(perRetryPolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); HttpLoggingPolicy loggingPolicy = new HttpLoggingPolicy(logOptions); if (credential == null) { logger.verbose("Credentials are null, enabling anonymous access"); } ArrayList<HttpPipelinePolicy> credentialPolicies = clone(policies); credentialPolicies.add(loggingPolicy); if (audience == null) { audience = ContainerRegistryAudience.AZURE_RESOURCE_MANAGER_PUBLIC_CLOUD; } ContainerRegistryTokenService tokenService = new ContainerRegistryTokenService( credential, audience, endpoint, serviceVersion, new HttpPipelineBuilder() .policies(credentialPolicies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(), JacksonAdapter.createDefaultSerializerAdapter()); ContainerRegistryCredentialsPolicy credentialsPolicy = new ContainerRegistryCredentialsPolicy(tokenService); policies.add(credentialsPolicy); policies.add(loggingPolicy); HttpPipeline httpPipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return httpPipeline; } private static ArrayList<HttpPipelinePolicy> clone(ArrayList<HttpPipelinePolicy> policies) { ArrayList<HttpPipelinePolicy> clonedPolicy = new ArrayList<>(); for (HttpPipelinePolicy policy:policies) { clonedPolicy.add(policy); } return clonedPolicy; } /** * This method computes the digest for the buffer content. * Docker digest is a SHA256 hash of the docker image content and is deterministic based on the image build. * @param buffer The buffer containing the image bytes. * @return SHA-256 digest for the given buffer. */ private static final char[] HEX_ARRAY = "0123456789abcdef".toCharArray(); private static String byteArrayToHex(byte[] bytes) { char[] hexChars = new char[bytes.length * 2]; for (int j = 0; j < bytes.length; j++) { int v = bytes[j] & 0xFF; hexChars[j * 2] = HEX_ARRAY[v >>> 4]; hexChars[j * 2 + 1] = HEX_ARRAY[v & 0x0F]; } return new String(hexChars); } /** * Delete operation should be idempotent. * And so should result in a success in case the service response is 400 : Not found. * @param responseT The response object. * @param <T> The encapsulating value. * @return The transformed response object. */ public static <T> Mono<Response<Void>> deleteResponseToSuccess(Response<T> responseT) { if (responseT.getStatusCode() != HTTP_STATUS_CODE_NOT_FOUND) { return getAcceptedDeleteResponse(responseT, responseT.getStatusCode()); } return getAcceptedDeleteResponse(responseT, HTTP_STATUS_CODE_ACCEPTED); } static <T> Mono<Response<Void>> getAcceptedDeleteResponse(Response<T> responseT, int statusCode) { return Mono.just(new SimpleResponse<Void>( responseT.getRequest(), statusCode, responseT.getHeaders(), null)); } /** * This method converts the API response codes into well known exceptions. * @param exception The exception returned by the rest client. * @return The exception returned by the public methods. */ public static Throwable mapException(Throwable exception) { AcrErrorsException acrException = null; if (exception instanceof AcrErrorsException) { acrException = ((AcrErrorsException) exception); } else if (exception instanceof RuntimeException) { RuntimeException runtimeException = (RuntimeException) exception; Throwable throwable = runtimeException.getCause(); if (throwable instanceof AcrErrorsException) { acrException = (AcrErrorsException) throwable; } } if (acrException == null) { return exception; } final HttpResponse errorHttpResponse = acrException.getResponse(); final int statusCode = errorHttpResponse.getStatusCode(); final String errorDetail = acrException.getMessage(); switch (statusCode) { case 401: return new ClientAuthenticationException(errorDetail, acrException.getResponse(), exception); case 404: return new ResourceNotFoundException(errorDetail, acrException.getResponse(), exception); case 409: return new ResourceExistsException(errorDetail, acrException.getResponse(), exception); case 412: return new ResourceModifiedException(errorDetail, acrException.getResponse(), exception); default: return new HttpResponseException(errorDetail, acrException.getResponse(), exception); } } /** * This method parses the response to get the continuation token used to make the next pagination call. * The continuation token is returned by the service in the form of a header and not as a nextLink field. * @param listResponse response that is parsed. * @param <T> the model type that is being operated on. * @return paged response with the correct continuation token. */ public static <T> PagedResponse<T> getPagedResponseWithContinuationToken(PagedResponse<T> listResponse) { return getPagedResponseWithContinuationToken(listResponse, values -> values); } /** * This method parses the response to get the continuation token used to make the next pagination call. * The continuation token is returned by the service in the form of a header and not as a nextLink field. * * <p> * Per the Docker v2 HTTP API spec, the Link header is an RFC5988 * compliant rel='next' with URL to next result set, if available. * See: https: * * The URI reference can be obtained from link-value as follows: * Link = "Link" ":" * link-value = "<" URI-Reference ">" * (";" link-param ) * See: https: * </p> * @param listResponse response that is parsed. * @param mapperFunction the function that maps the rest api response into the public model exposed by the client. * @param <T> The model type returned by the rest client. * @param <R> The model type returned by the public client. * @return paged response with the correct continuation token. */ public static <T, R> PagedResponse<T> getPagedResponseWithContinuationToken(PagedResponse<R> listResponse, Function<List<R>, List<T>> mapperFunction) { Objects.requireNonNull(mapperFunction); String continuationLink = null; HttpHeaders headers = listResponse.getHeaders(); if (headers != null) { String continuationLinkHeader = headers.getValue(CONTINUATION_LINK_HEADER_NAME); if (!CoreUtils.isNullOrEmpty(continuationLinkHeader)) { Matcher matcher = CONTINUATION_LINK_PATTERN.matcher(continuationLinkHeader); if (matcher.matches()) { if (matcher.groupCount() == 1) { continuationLink = matcher.group(1); } } } } List<T> values = mapperFunction.apply(listResponse.getValue()); return new PagedResponseBase<String, T>( listResponse.getRequest(), listResponse.getStatusCode(), listResponse.getHeaders(), values, continuationLink, null ); } }
Maybe update the private constructor to take values so that this can be a one-liner and so the fields can be final #Resolved
public static DownloadManifestOptions fromTag(String tag) { DownloadManifestOptions options = new DownloadManifestOptions(); options.tag = tag; return options; }
DownloadManifestOptions options = new DownloadManifestOptions();
public static DownloadManifestOptions fromTag(String tag) { Objects.requireNonNull(tag, "tag can't be null"); return new DownloadManifestOptions(tag, null); }
class with tag. * @param tag The tag associated with the manifest. * @return The DownloadManifestOptions object. */
class with tag. * @param tag The tag associated with the manifest. * @return The DownloadManifestOptions object. */
why changing this
void configurationPropertiesShouldBind() { String accountName = "test-account-name"; String connectionString = String.format(STORAGE_CONNECTION_STRING_PATTERN, accountName, "test-key"); String endpoint = String.format("https: String customerProvidedKey = "fakekey"; this.contextRunner .withPropertyValues( "spring.cloud.azure.storage.blob.endpoint=" + endpoint, "spring.cloud.azure.storage.blob.account-key=test-key", "spring.cloud.azure.storage.blob.sas-token=test-sas-token", "spring.cloud.azure.storage.blob.connection-string=" + connectionString, "spring.cloud.azure.storage.blob.account-name=test-account-name", "spring.cloud.azure.storage.blob.customer-provided-key=" + customerProvidedKey, "spring.cloud.azure.storage.blob.encryption-scope=test-scope", "spring.cloud.azure.storage.blob.service-version=V2020_08_04", "spring.cloud.azure.storage.blob.container-name=test-container", "spring.cloud.azure.storage.blob.blob-name=test-blob" ) .withBean(AzureGlobalProperties.class, AzureGlobalProperties::new) .run(context -> { assertThat(context).hasSingleBean(AzureStorageBlobProperties.class); AzureStorageBlobProperties properties = context.getBean(AzureStorageBlobProperties.class); assertEquals(endpoint, properties.getEndpoint()); assertEquals("test-key", properties.getAccountKey()); assertEquals("test-sas-token", properties.getSasToken()); assertEquals(connectionString, properties.getConnectionString()); assertEquals(accountName, properties.getAccountName()); assertEquals(customerProvidedKey, properties.getCustomerProvidedKey()); assertEquals("test-scope", properties.getEncryptionScope()); assertEquals(BlobServiceVersion.V2020_08_04, properties.getServiceVersion()); assertEquals("test-container", properties.getContainerName()); assertEquals("test-blob", properties.getBlobName()); }); }
String customerProvidedKey = "fakekey";
void configurationPropertiesShouldBind() { String accountName = "test-account-name"; String connectionString = String.format(STORAGE_CONNECTION_STRING_PATTERN, accountName, "test-key"); String endpoint = String.format("https: String customerProvidedKey = "fakekey"; this.contextRunner .withPropertyValues( "spring.cloud.azure.storage.blob.endpoint=" + endpoint, "spring.cloud.azure.storage.blob.account-key=test-key", "spring.cloud.azure.storage.blob.sas-token=test-sas-token", "spring.cloud.azure.storage.blob.connection-string=" + connectionString, "spring.cloud.azure.storage.blob.account-name=test-account-name", "spring.cloud.azure.storage.blob.customer-provided-key=" + customerProvidedKey, "spring.cloud.azure.storage.blob.encryption-scope=test-scope", "spring.cloud.azure.storage.blob.service-version=V2020_08_04", "spring.cloud.azure.storage.blob.container-name=test-container", "spring.cloud.azure.storage.blob.blob-name=test-blob" ) .withBean(AzureGlobalProperties.class, AzureGlobalProperties::new) .run(context -> { assertThat(context).hasSingleBean(AzureStorageBlobProperties.class); AzureStorageBlobProperties properties = context.getBean(AzureStorageBlobProperties.class); assertEquals(endpoint, properties.getEndpoint()); assertEquals("test-key", properties.getAccountKey()); assertEquals("test-sas-token", properties.getSasToken()); assertEquals(connectionString, properties.getConnectionString()); assertEquals(accountName, properties.getAccountName()); assertEquals(customerProvidedKey, properties.getCustomerProvidedKey()); assertEquals("test-scope", properties.getEncryptionScope()); assertEquals(BlobServiceVersion.V2020_08_04, properties.getServiceVersion()); assertEquals("test-container", properties.getContainerName()); assertEquals("test-blob", properties.getBlobName()); }); }
class AzureStorageBlobAutoConfigurationTests { private static final String STORAGE_CONNECTION_STRING_PATTERN = "DefaultEndpointsProtocol=https;AccountName=%s;AccountKey=%s;EndpointSuffix=core.windows.net"; private final ApplicationContextRunner contextRunner = new ApplicationContextRunner() .withConfiguration(AutoConfigurations.of(AzureStorageBlobAutoConfiguration.class)); @Test void configureWithoutBlobServiceClientBuilder() { this.contextRunner .withClassLoader(new FilteredClassLoader(BlobServiceClientBuilder.class)) .withPropertyValues("spring.cloud.azure.storage.blob.account-name=sa") .run(context -> assertThat(context).doesNotHaveBean(AzureStorageBlobAutoConfiguration.class)); } @Test void configureWithStorageBlobDisabled() { this.contextRunner .withPropertyValues( "spring.cloud.azure.storage.blob.enabled=false", "spring.cloud.azure.storage.blob.account-name=sa" ) .run(context -> assertThat(context).doesNotHaveBean(AzureStorageBlobAutoConfiguration.class)); } @Test void accountNameSetShouldConfigure() { this.contextRunner .withPropertyValues("spring.cloud.azure.storage.blob.account-name=sa") .withBean(AzureGlobalProperties.class, AzureGlobalProperties::new) .run(context -> { assertThat(context).hasSingleBean(AzureStorageBlobAutoConfiguration.class); assertThat(context).hasSingleBean(AzureStorageBlobProperties.class); assertThat(context).hasSingleBean(BlobServiceClient.class); assertThat(context).hasSingleBean(BlobServiceAsyncClient.class); assertThat(context).hasSingleBean(BlobServiceClientBuilder.class); assertThat(context).hasSingleBean(BlobServiceClientBuilderFactory.class); }); } @Test void containerNameSetShouldConfigureContainerClient() { this.contextRunner .withPropertyValues( "spring.cloud.azure.storage.blob.account-name=sa", "spring.cloud.azure.storage.blob.container-name=container1" ) .withBean(AzureGlobalProperties.class, AzureGlobalProperties::new) .run(context -> { assertThat(context).hasSingleBean(BlobContainerClient.class); assertThat(context).hasSingleBean(BlobContainerAsyncClient.class); }); } @Test void containerNameNotSetShouldNotConfigureContainerClient() { this.contextRunner .withPropertyValues( "spring.cloud.azure.storage.blob.account-name=sa" ) .withBean(AzureGlobalProperties.class, AzureGlobalProperties::new) .run(context -> { assertThat(context).doesNotHaveBean(BlobContainerClient.class); assertThat(context).doesNotHaveBean(BlobContainerAsyncClient.class); }); } @Test void customizerShouldBeCalled() { BlobServiceClientBuilderCustomizer customizer = new BlobServiceClientBuilderCustomizer(); this.contextRunner .withPropertyValues("spring.cloud.azure.storage.blob.account-name=sa") .withBean(AzureGlobalProperties.class, AzureGlobalProperties::new) .withBean("customizer1", BlobServiceClientBuilderCustomizer.class, () -> customizer) .withBean("customizer2", BlobServiceClientBuilderCustomizer.class, () -> customizer) .run(context -> assertThat(customizer.getCustomizedTimes()).isEqualTo(2)); } @Test void otherCustomizerShouldNotBeCalled() { BlobServiceClientBuilderCustomizer customizer = new BlobServiceClientBuilderCustomizer(); OtherBuilderCustomizer otherBuilderCustomizer = new OtherBuilderCustomizer(); this.contextRunner .withPropertyValues("spring.cloud.azure.storage.blob.account-name=sa") .withBean(AzureGlobalProperties.class, AzureGlobalProperties::new) .withBean("customizer1", BlobServiceClientBuilderCustomizer.class, () -> customizer) .withBean("customizer2", BlobServiceClientBuilderCustomizer.class, () -> customizer) .withBean("customizer3", OtherBuilderCustomizer.class, () -> otherBuilderCustomizer) .run(context -> { assertThat(customizer.getCustomizedTimes()).isEqualTo(2); assertThat(otherBuilderCustomizer.getCustomizedTimes()).isEqualTo(0); }); } @Test private static class BlobServiceClientBuilderCustomizer extends TestBuilderCustomizer<BlobServiceClientBuilder> { } private static class OtherBuilderCustomizer extends TestBuilderCustomizer<ConfigurationClientBuilder> { } }
class AzureStorageBlobAutoConfigurationTests { private static final String STORAGE_CONNECTION_STRING_PATTERN = "DefaultEndpointsProtocol=https;AccountName=%s;AccountKey=%s;EndpointSuffix=core.windows.net"; private final ApplicationContextRunner contextRunner = new ApplicationContextRunner() .withConfiguration(AutoConfigurations.of(AzureStorageBlobAutoConfiguration.class)); @Test void configureWithoutBlobServiceClientBuilder() { this.contextRunner .withClassLoader(new FilteredClassLoader(BlobServiceClientBuilder.class)) .withPropertyValues("spring.cloud.azure.storage.blob.account-name=sa") .run(context -> assertThat(context).doesNotHaveBean(AzureStorageBlobAutoConfiguration.class)); } @Test void configureWithStorageBlobDisabled() { this.contextRunner .withPropertyValues( "spring.cloud.azure.storage.blob.enabled=false", "spring.cloud.azure.storage.blob.account-name=sa" ) .run(context -> assertThat(context).doesNotHaveBean(AzureStorageBlobAutoConfiguration.class)); } @Test void accountNameSetShouldConfigure() { this.contextRunner .withPropertyValues("spring.cloud.azure.storage.blob.account-name=sa") .withBean(AzureGlobalProperties.class, AzureGlobalProperties::new) .run(context -> { assertThat(context).hasSingleBean(AzureStorageBlobAutoConfiguration.class); assertThat(context).hasSingleBean(AzureStorageBlobProperties.class); assertThat(context).hasSingleBean(BlobServiceClient.class); assertThat(context).hasSingleBean(BlobServiceAsyncClient.class); assertThat(context).hasSingleBean(BlobServiceClientBuilder.class); assertThat(context).hasSingleBean(BlobServiceClientBuilderFactory.class); }); } @Test void containerNameSetShouldConfigureContainerClient() { this.contextRunner .withPropertyValues( "spring.cloud.azure.storage.blob.account-name=sa", "spring.cloud.azure.storage.blob.container-name=container1" ) .withBean(AzureGlobalProperties.class, AzureGlobalProperties::new) .run(context -> { assertThat(context).hasSingleBean(BlobContainerClient.class); assertThat(context).hasSingleBean(BlobContainerAsyncClient.class); }); } @Test void containerNameNotSetShouldNotConfigureContainerClient() { this.contextRunner .withPropertyValues( "spring.cloud.azure.storage.blob.account-name=sa" ) .withBean(AzureGlobalProperties.class, AzureGlobalProperties::new) .run(context -> { assertThat(context).doesNotHaveBean(BlobContainerClient.class); assertThat(context).doesNotHaveBean(BlobContainerAsyncClient.class); }); } @Test void customizerShouldBeCalled() { BlobServiceClientBuilderCustomizer customizer = new BlobServiceClientBuilderCustomizer(); this.contextRunner .withPropertyValues("spring.cloud.azure.storage.blob.account-name=sa") .withBean(AzureGlobalProperties.class, AzureGlobalProperties::new) .withBean("customizer1", BlobServiceClientBuilderCustomizer.class, () -> customizer) .withBean("customizer2", BlobServiceClientBuilderCustomizer.class, () -> customizer) .run(context -> assertThat(customizer.getCustomizedTimes()).isEqualTo(2)); } @Test void otherCustomizerShouldNotBeCalled() { BlobServiceClientBuilderCustomizer customizer = new BlobServiceClientBuilderCustomizer(); OtherBuilderCustomizer otherBuilderCustomizer = new OtherBuilderCustomizer(); this.contextRunner .withPropertyValues("spring.cloud.azure.storage.blob.account-name=sa") .withBean(AzureGlobalProperties.class, AzureGlobalProperties::new) .withBean("customizer1", BlobServiceClientBuilderCustomizer.class, () -> customizer) .withBean("customizer2", BlobServiceClientBuilderCustomizer.class, () -> customizer) .withBean("customizer3", OtherBuilderCustomizer.class, () -> otherBuilderCustomizer) .run(context -> { assertThat(customizer.getCustomizedTimes()).isEqualTo(2); assertThat(otherBuilderCustomizer.getCustomizedTimes()).isEqualTo(0); }); } @Test private static class BlobServiceClientBuilderCustomizer extends TestBuilderCustomizer<BlobServiceClientBuilder> { } private static class OtherBuilderCustomizer extends TestBuilderCustomizer<ConfigurationClientBuilder> { } }
`SimpleResponse` may be a better option here than `ResponseBase` #Resolved
Mono<Response<DownloadManifestResult>> downloadManifestWithResponse(String tagOrDigest, Context context) { if (tagOrDigest == null) { return monoError(logger, new NullPointerException("'tagOrDigest' can't be null.")); } return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context) .flatMap(response -> { String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME); ManifestWrapper wrapper = response.getValue(); if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) { OciManifest ociManifest = new OciManifest() .setAnnotations(wrapper.getAnnotations()) .setConfig(wrapper.getConfig()) .setLayers(wrapper.getLayers()) .setSchemaVersion(wrapper.getSchemaVersion()); Response<DownloadManifestResult> res = new ResponseBase<Void, DownloadManifestResult>( response.getRequest(), response.getStatusCode(), response.getHeaders(), new DownloadManifestResult(digest, ociManifest, BinaryData.fromObject(ociManifest)), null); return Mono.just(res); } else { return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest.")); } }).onErrorMap(UtilsImpl::mapException); }
Response<DownloadManifestResult> res = new ResponseBase<Void, DownloadManifestResult>(
return monoError(logger, new NullPointerException("'tagOrDigest' can't be null.")); } return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context) .flatMap(response -> { String digest = response.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME); ManifestWrapper wrapper = response.getValue(); if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) { OciManifest ociManifest = new OciManifest() .setAnnotations(wrapper.getAnnotations()) .setConfig(wrapper.getConfig()) .setLayers(wrapper.getLayers()) .setSchemaVersion(wrapper.getSchemaVersion()); Response<DownloadManifestResult> res = new SimpleResponse<>( response.getRequest(), response.getStatusCode(), response.getHeaders(), new DownloadManifestResult(digest, ociManifest, BinaryData.fromObject(ociManifest))); return Mono.just(res); } else { return monoError(logger, new ServiceResponseException("The digest in the response does not match the expected digest.")); } }
class ContainerRegistryBlobAsyncClient { private final AzureContainerRegistryImpl registryImplClient; private final ContainerRegistryBlobsImpl blobsImpl; private final ContainerRegistriesImpl registriesImpl; private final String endpoint; private final String repositoryName; private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class); ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) { this.repositoryName = repositoryName; this.endpoint = endpoint; this.registryImplClient = new AzureContainerRegistryImplBuilder() .url(endpoint) .pipeline(httpPipeline) .apiVersion(version) .buildClient(); this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs(); this.registriesImpl = this.registryImplClient.getContainerRegistries(); } /** * This method returns the registry's repository on which operations are being performed. * * @return The name of the repository */ public String getRepositoryName() { return this.repositoryName; } /** * This method returns the complete registry endpoint. * * @return The registry endpoint including the authority. */ public String getEndpoint() { return this.endpoint; } /** * Upload the Oci manifest to the repository. * The upload is done as a single operation. * @see <a href="https: * @param manifest The OciManifest that needs to be uploaded. * @return operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code manifest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) { if (manifest == null) { return monoError(logger, new NullPointerException("'manifest' can't be null.")); } return uploadManifest(BinaryData.fromObject(manifest)); } /** * Upload the Oci manifest to the repository. * The upload is done as a single operation. * @see <a href="https: * @param manifest The OciManifest that needs to be uploaded. * @param options The options for the upload manifest operation. * @return operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code manifest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadManifestResult> uploadManifest(OciManifest manifest, UploadManifestOptions options) { if (manifest == null) { return monoError(logger, new NullPointerException("'manifest' can't be null.")); } return uploadManifest(BinaryData.fromObject(manifest), options); } /** * Uploads a manifest to the repository. * The client currently only supports uploading OciManifests to the repository. * And this operation makes the assumption that the data provided is a valid OCI manifest. * <p> * Also, the data is read into memory and then an upload operation is performed as a single operation. * @see <a href="https: * @param data The manifest that needs to be uploaded. * @return operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadManifestResult> uploadManifest(BinaryData data) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), null, context)).flatMap(FluxUtil::toMono); } /** * Uploads a manifest to the repository. * The client currently only supports uploading OciManifests to the repository. * And this operation makes the assumption that the data provided is a valid OCI manifest. * <p> * Also, the data is read into memory and then an upload operation is performed as a single operation. * @see <a href="https: * @param data The manifest that needs to be uploaded. * @param options The options for the upload manifest operation. * @return operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadManifestResult> uploadManifest(BinaryData data, UploadManifestOptions options) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), options, context)).flatMap(FluxUtil::toMono); } /** * Uploads a manifest to the repository. * The client currently only supports uploading OciManifests to the repository. * And this operation makes the assumption that the data provided is a valid OCI manifest. * <p> * Also, the data is read into memory and then an upload operation is performed as a single operation. * @see <a href="https: * * @param data The manifest that needs to be uploaded. * @param options The options for the upload manifest operation. * @return The rest response containing the operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(BinaryData data, UploadManifestOptions options) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(data.toByteBuffer(), options, context)); } Mono<Response<UploadManifestResult>> uploadManifestWithResponse(ByteBuffer data, UploadManifestOptions options, Context context) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } String tagOrDigest = null; if (options != null) { tagOrDigest = options.getTag(); } if (tagOrDigest == null) { tagOrDigest = UtilsImpl.computeDigest(data); } return this.registriesImpl.createManifestWithResponseAsync( repositoryName, tagOrDigest, Flux.just(data), data.remaining(), UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context).map(response -> { Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>( response.getRequest(), response.getStatusCode(), response.getHeaders(), new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()), response.getDeserializedHeaders()); return res; }).onErrorMap(UtilsImpl::mapException); } /** * Uploads a blob to the repository. * The client currently uploads the entire blob\layer as a single unit. * <p> * The blob is read into memory and then an upload operation is performed as a single operation. * We currently do not support breaking the layer into multiple chunks and uploading them one at a time * * @param data The blob\image content that needs to be uploaded. * @return The operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadBlobResult> uploadBlob(BinaryData data) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono); } /** * Uploads a blob to the repository. * The client currently uploads the entire blob\layer as a single unit. * <p> * The blob is read into memory and then an upload operation is performed as a single operation. * We currently do not support breaking the layer into multiple chunks and uploading them one at a time * * @param data The blob\image content that needs to be uploaded. * @return The rest response containing the operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) { return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)); } Mono<Response<UploadBlobResult>> uploadBlobWithResponse(ByteBuffer data, Context context) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } String digest = UtilsImpl.computeDigest(data); return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context) .flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), Flux.just(data), data.remaining(), context)) .flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest, trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context)) .flatMap(completeUploadResponse -> { Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(), completeUploadResponse.getStatusCode(), completeUploadResponse.getHeaders(), new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()), completeUploadResponse.getDeserializedHeaders()); return Mono.just(res); }).onErrorMap(UtilsImpl::mapException); } private String trimNextLink(String locationHeader) { if (locationHeader.startsWith("/")) { return locationHeader.substring(1); } return locationHeader; } /** * Download the manifest associated with the given tag or digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param tagOrDigest The tag or digest of the manifest. * @return The manifest associated with the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DownloadManifestResult> downloadManifest(String tagOrDigest) { return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono); } /** * Download the manifest associated with the given tag or digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param tagOrDigest The tag or digest of the manifest. * @return The response for the manifest associated with the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<DownloadManifestResult>> downloadManifestWithResponse(String tagOrDigest) { return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context)); } Mono<Response<DownloadManifestResult>> downloadManifestWithResponse(String tagOrDigest, Context context) { if (tagOrDigest == null) { ).onErrorMap(UtilsImpl::mapException); } /** * Download the blob associated with the given digest. * * @param digest The digest for the given image layer. * @return The image associated with the given digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DownloadBlobResult> downloadBlob(String digest) { return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Download the blob\layer associated with the given digest. * * @param digest The digest for the given image layer. * @return The image associated with the given digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) { return withContext(context -> this.downloadBlobWithResponse(digest, context)); } Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) { if (digest == null) { return monoError(logger, new NullPointerException("'digest' can't be null.")); } return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context).flatMap(streamResponse -> { String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME); return BinaryData.fromFlux(streamResponse.getValue()) .flatMap(binaryData -> { Response<DownloadBlobResult> response = new ResponseBase<HttpHeaders, DownloadBlobResult>( streamResponse.getRequest(), streamResponse.getStatusCode(), streamResponse.getHeaders(), new DownloadBlobResult(resDigest, binaryData), null); return Mono.just(response); }); }).onErrorMap(UtilsImpl::mapException); } /** * Delete the image associated with the given digest * * @param digest The digest for the given image layer. * @return The completion signal. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteBlob(String digest) { return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Delete the image associated with the given digest * * @param digest The digest for the given image layer. * @return The REST response for the completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteBlobWithResponse(String digest) { return withContext(context -> deleteBlobWithResponse(digest, context)); } Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) { if (digest == null) { return monoError(logger, new NullPointerException("'digest' can't be null.")); } return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context) .flatMap(UtilsImpl::deleteResponseToSuccess) .onErrorMap(UtilsImpl::mapException); } /** * Delete the manifest associated with the given digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param digest The digest of the manifest. * @return The completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteManifest(String digest) { return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Delete the manifest associated with the given digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param digest The digest of the manifest. * @return The REST response for completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteManifestWithResponse(String digest) { return withContext(context -> deleteManifestWithResponse(digest, context)); } Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) { return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context) .flatMap(UtilsImpl::deleteResponseToSuccess) .onErrorMap(UtilsImpl::mapException); } }
class ContainerRegistryBlobAsyncClient { private final AzureContainerRegistryImpl registryImplClient; private final ContainerRegistryBlobsImpl blobsImpl; private final ContainerRegistriesImpl registriesImpl; private final String endpoint; private final String repositoryName; private final ClientLogger logger = new ClientLogger(ContainerRegistryBlobAsyncClient.class); ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) { this.repositoryName = repositoryName; this.endpoint = endpoint; this.registryImplClient = new AzureContainerRegistryImplBuilder() .url(endpoint) .pipeline(httpPipeline) .apiVersion(version) .buildClient(); this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs(); this.registriesImpl = this.registryImplClient.getContainerRegistries(); } /** * This method returns the registry's repository on which operations are being performed. * * @return The name of the repository */ public String getRepositoryName() { return this.repositoryName; } /** * This method returns the complete registry endpoint. * * @return The registry endpoint including the authority. */ public String getEndpoint() { return this.endpoint; } /** * Upload the Oci manifest to the repository. * The upload is done as a single operation. * @see <a href="https: * @param manifest The OciManifest that needs to be uploaded. * @return operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code manifest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) { if (manifest == null) { return monoError(logger, new NullPointerException("'manifest' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(new UploadManifestOptions(manifest), context)).flatMap(FluxUtil::toMono); } /** * Uploads a manifest to the repository. * The client currently only supports uploading OciManifests to the repository. * And this operation makes the assumption that the data provided is a valid OCI manifest. * <p> * Also, the data is read into memory and then an upload operation is performed as a single operation. * @see <a href="https: * @param options The options for the upload manifest operation. * @return operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadManifestResult> uploadManifest(UploadManifestOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(options, context)).flatMap(FluxUtil::toMono); } /** * Uploads a manifest to the repository. * The client currently only supports uploading OciManifests to the repository. * And this operation makes the assumption that the data provided is a valid OCI manifest. * <p> * Also, the data is read into memory and then an upload operation is performed as a single operation. * @see <a href="https: * * @param options The options for the upload manifest operation. * @return The rest response containing the operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(UploadManifestOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(options, context)); } Mono<Response<UploadManifestResult>> uploadManifestWithResponse(UploadManifestOptions options, Context context) { if (options == null) { return monoError(logger, new NullPointerException("'options' can't be null.")); } ByteBuffer data = options.getManifest().toByteBuffer(); String tagOrDigest = options.getTag() != null ? options.getTag() : UtilsImpl.computeDigest(data); return this.registriesImpl.createManifestWithResponseAsync( repositoryName, tagOrDigest, Flux.just(data), data.remaining(), UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context).map(response -> { Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>( response.getRequest(), response.getStatusCode(), response.getHeaders(), new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()), response.getDeserializedHeaders()); return res; }).onErrorMap(UtilsImpl::mapException); } /** * Uploads a blob to the repository. * The client currently uploads the entire blob\layer as a single unit. * <p> * The blob is read into memory and then an upload operation is performed as a single operation. * We currently do not support breaking the layer into multiple chunks and uploading them one at a time * * @param data The blob\image content that needs to be uploaded. * @return The operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadBlobResult> uploadBlob(BinaryData data) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono); } /** * Uploads a blob to the repository. * The client currently uploads the entire blob\layer as a single unit. * <p> * The blob is read into memory and then an upload operation is performed as a single operation. * We currently do not support breaking the layer into multiple chunks and uploading them one at a time * * @param data The blob\image content that needs to be uploaded. * @return The rest response containing the operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) { return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)); } Mono<Response<UploadBlobResult>> uploadBlobWithResponse(ByteBuffer data, Context context) { if (data == null) { return monoError(logger, new NullPointerException("'data' can't be null.")); } String digest = UtilsImpl.computeDigest(data); return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context) .flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), Flux.just(data), data.remaining(), context)) .flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest, trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), null, 0L, context)) .flatMap(completeUploadResponse -> { Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(), completeUploadResponse.getStatusCode(), completeUploadResponse.getHeaders(), new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()), completeUploadResponse.getDeserializedHeaders()); return Mono.just(res); }).onErrorMap(UtilsImpl::mapException); } private String trimNextLink(String locationHeader) { if (locationHeader.startsWith("/")) { return locationHeader.substring(1); } return locationHeader; } /** * Download the manifest associated with the given tag or digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param tagOrDigest The tag or digest of the manifest. * @return The manifest associated with the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DownloadManifestResult> downloadManifest(String tagOrDigest) { return this.downloadManifestWithResponse(tagOrDigest).flatMap(FluxUtil::toMono); } /** * Download the manifest associated with the given tag or digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param tagOrDigest The tag or digest of the manifest. * @return The response for the manifest associated with the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<DownloadManifestResult>> downloadManifestWithResponse(String tagOrDigest) { return withContext(context -> this.downloadManifestWithResponse(tagOrDigest, context)); } Mono<Response<DownloadManifestResult>> downloadManifestWithResponse(String tagOrDigest, Context context) { if (tagOrDigest == null) { ).onErrorMap(UtilsImpl::mapException); } /** * Download the blob associated with the given digest. * * @param digest The digest for the given image layer. * @return The image associated with the given digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DownloadBlobResult> downloadBlob(String digest) { return this.downloadBlobWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Download the blob\layer associated with the given digest. * * @param digest The digest for the given image layer. * @return The image associated with the given digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest) { return withContext(context -> this.downloadBlobWithResponse(digest, context)); } Mono<Response<DownloadBlobResult>> downloadBlobWithResponse(String digest, Context context) { if (digest == null) { return monoError(logger, new NullPointerException("'digest' can't be null.")); } return this.blobsImpl.getBlobWithResponseAsync(repositoryName, digest, context).flatMap(streamResponse -> { String resDigest = streamResponse.getHeaders().getValue(UtilsImpl.DOCKER_DIGEST_HEADER_NAME); return BinaryData.fromFlux(streamResponse.getValue()) .flatMap(binaryData -> { Response<DownloadBlobResult> response = new SimpleResponse<>( streamResponse.getRequest(), streamResponse.getStatusCode(), streamResponse.getHeaders(), new DownloadBlobResult(resDigest, binaryData)); return Mono.just(response); }); }).onErrorMap(UtilsImpl::mapException); } /** * Delete the image associated with the given digest * * @param digest The digest for the given image layer. * @return The completion signal. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteBlob(String digest) { return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Delete the image associated with the given digest * * @param digest The digest for the given image layer. * @return The REST response for the completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteBlobWithResponse(String digest) { return withContext(context -> deleteBlobWithResponse(digest, context)); } Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) { if (digest == null) { return monoError(logger, new NullPointerException("'digest' can't be null.")); } return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context) .flatMap(UtilsImpl::deleteResponseToSuccess) .onErrorMap(UtilsImpl::mapException); } /** * Delete the manifest associated with the given digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param digest The digest of the manifest. * @return The completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteManifest(String digest) { return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Delete the manifest associated with the given digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param digest The digest of the manifest. * @return The REST response for completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteManifestWithResponse(String digest) { return withContext(context -> deleteManifestWithResponse(digest, context)); } Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) { return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context) .flatMap(UtilsImpl::deleteResponseToSuccess) .onErrorMap(UtilsImpl::mapException); } }
nit: Is this going to be commonly called? If so, can we use string concatenation instead of formatting as it is more readable and more performant (or at least should be).
public RequestResponseChannelClosedException(EndpointState sendLinkState, EndpointState receiveLinkState) { super(String.format("Cannot send a message when request response channel is disposed. LinkState: (%s, %s)", sendLinkState, receiveLinkState)); }
super(String.format("Cannot send a message when request response channel is disposed. LinkState: (%s, %s)",
public RequestResponseChannelClosedException(EndpointState sendLinkState, EndpointState receiveLinkState) { super("Cannot send a message when request response channel is disposed. LinkState: (" + sendLinkState + "," + receiveLinkState + ")"); }
class RequestResponseChannelClosedException extends IllegalStateException { public RequestResponseChannelClosedException() { super("Cannot send a message when request response channel is disposed."); } }
class RequestResponseChannelClosedException extends IllegalStateException { public RequestResponseChannelClosedException() { super("Cannot send a message when request response channel is disposed."); } }
I think the logic has been changed here when you replaced the shortened if by the `getHttpClient` function.
protected PhoneNumbersClientBuilder getClientBuilderUsingManagedIdentity(HttpClient httpClient) { PhoneNumbersClientBuilder builder = new PhoneNumbersClientBuilder(); builder .httpClient(getHttpClient(httpClient)) .addPolicy(getOverrideMSUseragentPolicy()) .endpoint(new CommunicationConnectionString(CONNECTION_STRING).getEndpoint()); if (getTestMode() == TestMode.PLAYBACK) { builder.credential(new FakeCredentials()); } else { builder.credential(new DefaultAzureCredentialBuilder().build()); } if (shouldRecord()) { builder.addPolicy(getRecordPolicy()); } return builder; }
builder.credential(new FakeCredentials());
protected PhoneNumbersClientBuilder getClientBuilderUsingManagedIdentity(HttpClient httpClient) { PhoneNumbersClientBuilder builder = new PhoneNumbersClientBuilder(); builder .httpClient(getHttpClient(httpClient)) .addPolicy(getOverrideMSUserAgentPolicy()) .endpoint(new CommunicationConnectionString(CONNECTION_STRING).getEndpoint()); if (getTestMode() == TestMode.PLAYBACK) { builder.credential(new FakeCredentials()); } else { builder.credential(new DefaultAzureCredentialBuilder().build()); } if (shouldRecord()) { builder.addPolicy(getRecordPolicy()); } return builder; }
class PhoneNumbersIntegrationTestBase extends TestBase { private static final String CONNECTION_STRING = Configuration.getGlobalConfiguration() .get("COMMUNICATION_LIVETEST_STATIC_CONNECTION_STRING", "endpoint=https: protected static final String COUNTRY_CODE = Configuration.getGlobalConfiguration().get("COUNTRY_CODE", "US"); protected static final String AREA_CODE = Configuration.getGlobalConfiguration().get("AREA_CODE", "833"); protected static final String MS_USERAGENT_OVERRIDE = Configuration.getGlobalConfiguration().get("AZURE_USERAGENT_OVERRIDE", ""); private static final StringJoiner JSON_PROPERTIES_TO_REDACT = new StringJoiner("\":\"|\"", "\"", "\":\"") .add("id") .add("phoneNumber"); private static final Pattern JSON_PROPERTY_VALUE_REDACTION_PATTERN = Pattern.compile(String.format("(?:%s)(.*?)(?:\",|\"}
class PhoneNumbersIntegrationTestBase extends TestBase { private static final String CONNECTION_STRING = Configuration.getGlobalConfiguration() .get("COMMUNICATION_LIVETEST_STATIC_CONNECTION_STRING", "endpoint=https: protected static final String COUNTRY_CODE = Configuration.getGlobalConfiguration().get("COUNTRY_CODE", "US"); protected static final String AREA_CODE = Configuration.getGlobalConfiguration().get("AREA_CODE", "833"); protected static final String MS_USERAGENT_OVERRIDE = Configuration.getGlobalConfiguration().get("AZURE_USERAGENT_OVERRIDE", ""); private static final StringJoiner JSON_PROPERTIES_TO_REDACT = new StringJoiner("\":\"|\"", "\"", "\":\"") .add("id") .add("phoneNumber"); private static final Pattern JSON_PROPERTY_VALUE_REDACTION_PATTERN = Pattern.compile(String.format("(?:%s)(.*?)(?:\",|\"}
Why not use MS_USERAGENT_OVERRIDE.isEmpty() ?
private HttpPipelinePolicy getOverrideMSUserAgentPolicy() { HttpHeaders headers = new HttpHeaders(); if (!"".equals(MS_USERAGENT_OVERRIDE)) { headers.add("x-ms-useragent", MS_USERAGENT_OVERRIDE); } return new AddHeadersPolicy(headers); }
if (!"".equals(MS_USERAGENT_OVERRIDE)) {
private HttpPipelinePolicy getOverrideMSUserAgentPolicy() { HttpHeaders headers = new HttpHeaders(); if (!MS_USERAGENT_OVERRIDE.isEmpty()) { headers.add("x-ms-useragent", MS_USERAGENT_OVERRIDE); } return new AddHeadersPolicy(headers); }
class PhoneNumbersIntegrationTestBase extends TestBase { private static final String CONNECTION_STRING = Configuration.getGlobalConfiguration() .get("COMMUNICATION_LIVETEST_STATIC_CONNECTION_STRING", "endpoint=https: protected static final String COUNTRY_CODE = Configuration.getGlobalConfiguration().get("COUNTRY_CODE", "US"); protected static final String AREA_CODE = Configuration.getGlobalConfiguration().get("AREA_CODE", "833"); protected static final String MS_USERAGENT_OVERRIDE = Configuration.getGlobalConfiguration().get("AZURE_USERAGENT_OVERRIDE", ""); private static final StringJoiner JSON_PROPERTIES_TO_REDACT = new StringJoiner("\":\"|\"", "\"", "\":\"") .add("id") .add("phoneNumber"); private static final Pattern JSON_PROPERTY_VALUE_REDACTION_PATTERN = Pattern.compile(String.format("(?:%s)(.*?)(?:\",|\"}
class PhoneNumbersIntegrationTestBase extends TestBase { private static final String CONNECTION_STRING = Configuration.getGlobalConfiguration() .get("COMMUNICATION_LIVETEST_STATIC_CONNECTION_STRING", "endpoint=https: protected static final String COUNTRY_CODE = Configuration.getGlobalConfiguration().get("COUNTRY_CODE", "US"); protected static final String AREA_CODE = Configuration.getGlobalConfiguration().get("AREA_CODE", "833"); protected static final String MS_USERAGENT_OVERRIDE = Configuration.getGlobalConfiguration().get("AZURE_USERAGENT_OVERRIDE", ""); private static final StringJoiner JSON_PROPERTIES_TO_REDACT = new StringJoiner("\":\"|\"", "\"", "\":\"") .add("id") .add("phoneNumber"); private static final Pattern JSON_PROPERTY_VALUE_REDACTION_PATTERN = Pattern.compile(String.format("(?:%s)(.*?)(?:\",|\"}
I see that `getClientBuilderWithConnectionString` used to check if it is playback mode, while `getClientBuilderUsingManagedIdentity` checked for null; but neither checked for both. Is there any reason for generalizing on this?
private HttpClient getHttpClient(HttpClient httpClient) { if (httpClient == null || getTestMode() == TestMode.PLAYBACK) { return interceptorManager.getPlaybackClient(); } return httpClient; }
if (httpClient == null || getTestMode() == TestMode.PLAYBACK) {
private HttpClient getHttpClient(HttpClient httpClient) { if (httpClient == null || getTestMode() == TestMode.PLAYBACK) { return interceptorManager.getPlaybackClient(); } return httpClient; }
class PhoneNumbersIntegrationTestBase extends TestBase { private static final String CONNECTION_STRING = Configuration.getGlobalConfiguration() .get("COMMUNICATION_LIVETEST_STATIC_CONNECTION_STRING", "endpoint=https: protected static final String COUNTRY_CODE = Configuration.getGlobalConfiguration().get("COUNTRY_CODE", "US"); protected static final String AREA_CODE = Configuration.getGlobalConfiguration().get("AREA_CODE", "833"); protected static final String MS_USERAGENT_OVERRIDE = Configuration.getGlobalConfiguration().get("AZURE_USERAGENT_OVERRIDE", ""); private static final StringJoiner JSON_PROPERTIES_TO_REDACT = new StringJoiner("\":\"|\"", "\"", "\":\"") .add("id") .add("phoneNumber"); private static final Pattern JSON_PROPERTY_VALUE_REDACTION_PATTERN = Pattern.compile(String.format("(?:%s)(.*?)(?:\",|\"}
class PhoneNumbersIntegrationTestBase extends TestBase { private static final String CONNECTION_STRING = Configuration.getGlobalConfiguration() .get("COMMUNICATION_LIVETEST_STATIC_CONNECTION_STRING", "endpoint=https: protected static final String COUNTRY_CODE = Configuration.getGlobalConfiguration().get("COUNTRY_CODE", "US"); protected static final String AREA_CODE = Configuration.getGlobalConfiguration().get("AREA_CODE", "833"); protected static final String MS_USERAGENT_OVERRIDE = Configuration.getGlobalConfiguration().get("AZURE_USERAGENT_OVERRIDE", ""); private static final StringJoiner JSON_PROPERTIES_TO_REDACT = new StringJoiner("\":\"|\"", "\"", "\":\"") .add("id") .add("phoneNumber"); private static final Pattern JSON_PROPERTY_VALUE_REDACTION_PATTERN = Pattern.compile(String.format("(?:%s)(.*?)(?:\",|\"}
Nice abstractions!
private HttpPipelinePolicy getRecordPolicy() { List<Function<String, String>> redactors = new ArrayList<>(); redactors.add(data -> redact(data, JSON_PROPERTY_VALUE_REDACTION_PATTERN.matcher(data), "REDACTED")); return interceptorManager.getRecordPolicy(redactors); }
}
private HttpPipelinePolicy getRecordPolicy() { List<Function<String, String>> redactors = new ArrayList<>(); redactors.add(data -> redact(data, JSON_PROPERTY_VALUE_REDACTION_PATTERN.matcher(data), "REDACTED")); return interceptorManager.getRecordPolicy(redactors); }
class PhoneNumbersIntegrationTestBase extends TestBase { private static final String CONNECTION_STRING = Configuration.getGlobalConfiguration() .get("COMMUNICATION_LIVETEST_STATIC_CONNECTION_STRING", "endpoint=https: protected static final String COUNTRY_CODE = Configuration.getGlobalConfiguration().get("COUNTRY_CODE", "US"); protected static final String AREA_CODE = Configuration.getGlobalConfiguration().get("AREA_CODE", "833"); protected static final String MS_USERAGENT_OVERRIDE = Configuration.getGlobalConfiguration().get("AZURE_USERAGENT_OVERRIDE", ""); private static final StringJoiner JSON_PROPERTIES_TO_REDACT = new StringJoiner("\":\"|\"", "\"", "\":\"") .add("id") .add("phoneNumber"); private static final Pattern JSON_PROPERTY_VALUE_REDACTION_PATTERN = Pattern.compile(String.format("(?:%s)(.*?)(?:\",|\"}
class PhoneNumbersIntegrationTestBase extends TestBase { private static final String CONNECTION_STRING = Configuration.getGlobalConfiguration() .get("COMMUNICATION_LIVETEST_STATIC_CONNECTION_STRING", "endpoint=https: protected static final String COUNTRY_CODE = Configuration.getGlobalConfiguration().get("COUNTRY_CODE", "US"); protected static final String AREA_CODE = Configuration.getGlobalConfiguration().get("AREA_CODE", "833"); protected static final String MS_USERAGENT_OVERRIDE = Configuration.getGlobalConfiguration().get("AZURE_USERAGENT_OVERRIDE", ""); private static final StringJoiner JSON_PROPERTIES_TO_REDACT = new StringJoiner("\":\"|\"", "\"", "\":\"") .add("id") .add("phoneNumber"); private static final Pattern JSON_PROPERTY_VALUE_REDACTION_PATTERN = Pattern.compile(String.format("(?:%s)(.*?)(?:\",|\"}
👍
protected void doStop() { this.delegate.stop(); }
this.delegate.stop();
protected void doStop() { this.delegate.stop(); }
class ServiceBusMessageListenerContainer extends AbstractMessageListenerContainer { private static final Logger LOGGER = LoggerFactory.getLogger(ServiceBusMessageListenerContainer.class); private final ServiceBusProcessorFactory processorFactory; private final ServiceBusContainerProperties containerProperties; private ServiceBusErrorHandler errorHandler; private ServiceBusProcessorClient delegate; /** * Create an instance using the supplied processor factory and container properties. * * @param processorFactory the processor factory. * @param containerProperties the container properties. */ public ServiceBusMessageListenerContainer(ServiceBusProcessorFactory processorFactory, ServiceBusContainerProperties containerProperties) { this.processorFactory = processorFactory; this.containerProperties = containerProperties == null ? new ServiceBusContainerProperties() : containerProperties; } @Override protected void doStart() { String entityName = containerProperties.getEntityName(); String subscriptionName = containerProperties.getSubscriptionName(); if (this.errorHandler != null) { this.containerProperties.setErrorHandler(errorHandler); } if (StringUtils.hasText(subscriptionName)) { this.delegate = this.processorFactory.createProcessor(entityName, subscriptionName, containerProperties); } else { this.delegate = this.processorFactory.createProcessor(entityName, containerProperties); } this.delegate.start(); } @Override @Override public void setupMessageListener(Object messageListener) { this.containerProperties.setMessageListener((ServiceBusMessageListener) messageListener); } @Override public ServiceBusContainerProperties getContainerProperties() { return containerProperties; } /** * Set the error handler to call when the listener throws an exception. * @param errorHandler the error handler. */ public void setErrorHandler(ServiceBusErrorHandler errorHandler) { this.errorHandler = errorHandler; } }
class ServiceBusMessageListenerContainer extends AbstractMessageListenerContainer { private static final Logger LOGGER = LoggerFactory.getLogger(ServiceBusMessageListenerContainer.class); private final ServiceBusProcessorFactory processorFactory; private final ServiceBusContainerProperties containerProperties; private ServiceBusErrorHandler errorHandler; private ServiceBusProcessorClient delegate; /** * Create an instance using the supplied processor factory and container properties. * * @param processorFactory the processor factory. * @param containerProperties the container properties. */ public ServiceBusMessageListenerContainer(ServiceBusProcessorFactory processorFactory, ServiceBusContainerProperties containerProperties) { this.processorFactory = processorFactory; this.containerProperties = containerProperties == null ? new ServiceBusContainerProperties() : containerProperties; } @Override protected void doStart() { String entityName = containerProperties.getEntityName(); String subscriptionName = containerProperties.getSubscriptionName(); if (this.errorHandler != null) { this.containerProperties.setErrorHandler(errorHandler); } if (StringUtils.hasText(subscriptionName)) { this.delegate = this.processorFactory.createProcessor(entityName, subscriptionName, containerProperties); } else { this.delegate = this.processorFactory.createProcessor(entityName, containerProperties); } this.delegate.start(); } @Override @Override public void setupMessageListener(Object messageListener) { this.containerProperties.setMessageListener((ServiceBusMessageListener) messageListener); } @Override public ServiceBusContainerProperties getContainerProperties() { return containerProperties; } /** * Set the error handler to call when the listener throws an exception. * @param errorHandler the error handler. */ public void setErrorHandler(ServiceBusErrorHandler errorHandler) { this.errorHandler = errorHandler; } }
Is it possible to use a smaller VM size (D8 seems 8-core?) for test, or this is already the smallest?
public void canCreateVirtualMachineWithEphemeralOSDisk() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_D8S_V3) .withEphemeralOSDisk() .withPlacement(DiffDiskPlacement.CACHE_DISK) .withNewDataDisk(1, 1, CachingTypes.READ_WRITE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertNull(vm.osDiskDiskEncryptionSetId()); Assertions.assertTrue(vm.osDiskSize() > 0); Assertions.assertEquals(vm.osDiskDeleteOptions(), DeleteOptions.DELETE); Assertions.assertEquals(vm.osDiskCachingType(), CachingTypes.READ_ONLY); Assertions.assertFalse(CoreUtils.isNullOrEmpty(vm.dataDisks())); Assertions.assertTrue(vm.osDiskIsEphemeral()); Assertions.assertNotNull(vm.osDiskId()); String osDiskId = vm.osDiskId(); vm.update() .withoutDataDisk(1) .withNewDataDisk(1, 2, CachingTypes.NONE) .withNewDataDisk(1) .apply(); Assertions.assertEquals(vm.dataDisks().size(), 2); vm.powerOff(); vm.start(); vm.refresh(); Assertions.assertEquals(osDiskId, vm.osDiskId()); Assertions.assertThrows(Exception.class, vm::deallocate); }
.withSize(VirtualMachineSizeTypes.STANDARD_D8S_V3)
public void canCreateVirtualMachineWithEphemeralOSDisk() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_DS1_V2) .withEphemeralOSDisk() .withPlacement(DiffDiskPlacement.CACHE_DISK) .withNewDataDisk(1, 1, CachingTypes.READ_WRITE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertNull(vm.osDiskDiskEncryptionSetId()); Assertions.assertTrue(vm.osDiskSize() > 0); Assertions.assertEquals(vm.osDiskDeleteOptions(), DeleteOptions.DELETE); Assertions.assertEquals(vm.osDiskCachingType(), CachingTypes.READ_ONLY); Assertions.assertFalse(CoreUtils.isNullOrEmpty(vm.dataDisks())); Assertions.assertTrue(vm.isOSDiskEphemeral()); Assertions.assertNotNull(vm.osDiskId()); String osDiskId = vm.osDiskId(); vm.update() .withoutDataDisk(1) .withNewDataDisk(1, 2, CachingTypes.NONE) .withNewDataDisk(1) .apply(); Assertions.assertEquals(vm.dataDisks().size(), 2); vm.powerOff(); vm.start(); vm.refresh(); Assertions.assertEquals(osDiskId, vm.osDiskId()); Assertions.assertThrows(Exception.class, vm::deallocate); }
class VirtualMachineOperationsTests extends ComputeManagementTest { private String rgName = ""; private String rgName2 = ""; private final Region region = Region.US_EAST; private final Region regionProxPlacementGroup = Region.US_WEST; private final Region regionProxPlacementGroup2 = Region.US_EAST; private final String vmName = "javavm"; private final String proxGroupName = "testproxgroup1"; private final String proxGroupName2 = "testproxgroup2"; private final String availabilitySetName = "availset1"; private final String availabilitySetName2 = "availset2"; private final ProximityPlacementGroupType proxGroupType = ProximityPlacementGroupType.STANDARD; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { rgName = generateRandomResourceName("javacsmrg", 15); rgName2 = generateRandomResourceName("javacsmrg2", 15); super.initializeClients(httpPipeline, profile); } @Override protected void cleanUpResources() { if (rgName != null) { resourceManager.resourceGroups().beginDeleteByName(rgName); } } @Test public void canCreateVirtualMachineWithNetworking() throws Exception { NetworkSecurityGroup nsg = this .networkManager .networkSecurityGroups() .define("nsg") .withRegion(region) .withNewResourceGroup(rgName) .defineRule("rule1") .allowInbound() .fromAnyAddress() .fromPort(80) .toAnyAddress() .toPort(80) .withProtocol(SecurityRuleProtocol.TCP) .attach() .create(); Creatable<Network> networkDefinition = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .defineSubnet("subnet1") .withAddressPrefix("10.0.0.0/29") .withExistingNetworkSecurityGroup(nsg) .attach(); VirtualMachine vm = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork(networkDefinition) .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .create(); NetworkInterface primaryNic = vm.getPrimaryNetworkInterface(); Assertions.assertNotNull(primaryNic); NicIpConfiguration primaryIpConfig = primaryNic.primaryIPConfiguration(); Assertions.assertNotNull(primaryIpConfig); Assertions.assertNotNull(primaryIpConfig.networkId()); Network network = primaryIpConfig.getNetwork(); Assertions.assertNotNull(primaryIpConfig.subnetName()); Subnet subnet = network.subnets().get(primaryIpConfig.subnetName()); Assertions.assertNotNull(subnet); nsg = subnet.getNetworkSecurityGroup(); Assertions.assertNotNull(nsg); Assertions.assertEquals("nsg", nsg.name()); Assertions.assertEquals(1, nsg.securityRules().size()); nsg = primaryIpConfig.getNetworkSecurityGroup(); Assertions.assertEquals("nsg", nsg.name()); } @Test public void canCreateVirtualMachine() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotCreateVirtualMachineSyncPoll() throws Exception { final String mySqlInstallScript = "https: final String installCommand = "bash install_mysql_server_5.6.sh Abc.123x("; Assertions.assertThrows(IllegalStateException.class, () -> { Accepted<VirtualMachine> acceptedVirtualMachine = this.computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("fileUris", Collections.singletonList(mySqlInstallScript)) .withPublicSetting("commandToExecute", installCommand) .attach() .beginCreate(); }); boolean dependentResourceCreated = computeManager.resourceManager().serviceClient().getResourceGroups().checkExistence(rgName); Assertions.assertFalse(dependentResourceCreated); rgName = null; } @Test public void canCreateVirtualMachineSyncPoll() throws Exception { final long defaultDelayInMillis = 10 * 1000; Accepted<VirtualMachine> acceptedVirtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .beginCreate(); VirtualMachine createdVirtualMachine = acceptedVirtualMachine.getActivationResponse().getValue(); Assertions.assertNotEquals("Succeeded", createdVirtualMachine.provisioningState()); LongRunningOperationStatus pollStatus = acceptedVirtualMachine.getActivationResponse().getStatus(); long delayInMills = acceptedVirtualMachine.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : acceptedVirtualMachine.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedVirtualMachine.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : pollResponse.getRetryAfter().toMillis(); } Assertions.assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollStatus); VirtualMachine virtualMachine = acceptedVirtualMachine.getFinalResult(); Assertions.assertEquals("Succeeded", virtualMachine.provisioningState()); Accepted<Void> acceptedDelete = computeManager.virtualMachines() .beginDeleteByResourceGroup(virtualMachine.resourceGroupName(), virtualMachine.name()); pollStatus = acceptedDelete.getActivationResponse().getStatus(); delayInMills = acceptedDelete.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : (int) acceptedDelete.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedDelete.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : (int) pollResponse.getRetryAfter().toMillis(); } boolean deleted = false; try { computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException e) { if (e.getResponse().getStatusCode() == 404 && ("NotFound".equals(e.getValue().getCode()) || "ResourceNotFound".equals(e.getValue().getCode()))) { deleted = true; } } Assertions.assertTrue(deleted); } @Test public void canCreateUpdatePriorityAndPrice() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLowPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withMaxPrice(1000.0) .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); Assertions.assertEquals((Double) 1000.0, foundVM.billingProfile().maxPrice()); Assertions.assertEquals(VirtualMachineEvictionPolicyTypes.DEALLOCATE, foundVM.evictionPolicy()); try { foundVM.update().withMaxPrice(1500.0).apply(); Assertions.assertEquals((Double) 1500.0, foundVM.billingProfile().maxPrice()); Assertions.fail(); } catch (ManagementException e) { } foundVM.deallocate(); foundVM.update().withMaxPrice(2000.0).apply(); foundVM.start(); Assertions.assertEquals((Double) 2000.0, foundVM.billingProfile().maxPrice()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.SPOT).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.SPOT, foundVM.priority()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.LOW).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.LOW, foundVM.priority()); try { foundVM.update().withPriority(VirtualMachinePriorityTypes.REGULAR).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.REGULAR, foundVM.priority()); Assertions.fail(); } catch (ManagementException e) { } computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotUpdateProximityPlacementGroupForVirtualMachine() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); AvailabilitySet setCreated2 = computeManager .availabilitySets() .define(availabilitySetName2) .withRegion(regionProxPlacementGroup2) .withNewResourceGroup(rgName2) .withNewProximityPlacementGroup(proxGroupName2, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName2, setCreated2.name()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated2.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated2.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated2.id().equalsIgnoreCase(setCreated2.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated2.regionName(), setCreated2.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); try { VirtualMachine updatedVm = foundVM.update().withProximityPlacementGroup(setCreated2.proximityPlacementGroup().id()).apply(); } catch (ManagementException clEx) { Assertions .assertTrue( clEx .getMessage() .contains( "Updating proximity placement group of VM javavm is not allowed while the VM is running." + " Please stop/deallocate the VM and retry the operation.")); } computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndAvailabilitySetInSameProximityPlacementGroup() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); VirtualMachine updatedVm = foundVM.update().withoutProximityPlacementGroup().apply(); Assertions.assertNotNull(updatedVm.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, updatedVm.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(updatedVm.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(updatedVm.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(updatedVm.proximityPlacementGroup().availabilitySetIds().get(0))); computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndRelatedResourcesInParallel() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; List<String> networkCreatableKeys = creatablesInfo.networkCreatableKeys; List<String> publicIpCreatableKeys = creatablesInfo.publicIpCreatableKeys; CreatedResources<VirtualMachine> createdVirtualMachines = computeManager.virtualMachines().create(virtualMachineCreatables); Assertions.assertTrue(createdVirtualMachines.size() == count); Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } for (VirtualMachine virtualMachine : createdVirtualMachines.values()) { Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } for (String networkCreatableKey : networkCreatableKeys) { Network createdNetwork = (Network) createdVirtualMachines.createdRelatedResource(networkCreatableKey); Assertions.assertNotNull(createdNetwork); Assertions.assertTrue(networkNames.contains(createdNetwork.name())); } Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } for (String publicIpCreatableKey : publicIpCreatableKeys) { PublicIpAddress createdPublicIpAddress = (PublicIpAddress) createdVirtualMachines.createdRelatedResource(publicIpCreatableKey); Assertions.assertNotNull(createdPublicIpAddress); Assertions.assertTrue(publicIPAddressNames.contains(createdPublicIpAddress.name())); } } @Test public void canStreamParallelCreatedVirtualMachinesAndRelatedResources() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; final Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } final Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } final Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } final CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); final AtomicInteger resourceCount = new AtomicInteger(0); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; computeManager .virtualMachines() .createAsync(virtualMachineCreatables) .map( createdResource -> { if (createdResource instanceof Resource) { Resource resource = (Resource) createdResource; System.out.println("Created: " + resource.id()); if (resource instanceof VirtualMachine) { VirtualMachine virtualMachine = (VirtualMachine) resource; Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } else if (resource instanceof Network) { Network network = (Network) resource; Assertions.assertTrue(networkNames.contains(network.name())); Assertions.assertNotNull(network.id()); } else if (resource instanceof PublicIpAddress) { PublicIpAddress publicIPAddress = (PublicIpAddress) resource; Assertions.assertTrue(publicIPAddressNames.contains(publicIPAddress.name())); Assertions.assertNotNull(publicIPAddress.id()); } } resourceCount.incrementAndGet(); return createdResource; }) .blockLast(); networkNames.forEach(name -> { Assertions.assertNotNull(networkManager.networks().getByResourceGroup(rgName, name)); }); publicIPAddressNames.forEach(name -> { Assertions.assertNotNull(networkManager.publicIpAddresses().getByResourceGroup(rgName, name)); }); Assertions.assertEquals(1, storageManager.storageAccounts().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, networkManager.networkInterfaces().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, resourceCount.get()); } @Test public void canSetStorageAccountForUnmanagedDisk() { final String storageName = generateRandomResourceName("st", 14); StorageAccount storageAccount = storageManager .storageAccounts() .define(storageName) .withRegion(region) .withNewResourceGroup(rgName) .withSku(StorageAccountSkuType.PREMIUM_LRS) .create(); VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .defineUnmanagedDataDisk("disk1") .withNewVhd(100) .withLun(2) .storeAt(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .attach() .defineUnmanagedDataDisk("disk2") .withNewVhd(100) .withLun(3) .storeAt(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .attach() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .create(); Map<Integer, VirtualMachineUnmanagedDataDisk> unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); VirtualMachineUnmanagedDataDisk firstUnmanagedDataDisk = unmanagedDataDisks.get(2); Assertions.assertNotNull(firstUnmanagedDataDisk); VirtualMachineUnmanagedDataDisk secondUnmanagedDataDisk = unmanagedDataDisks.get(3); Assertions.assertNotNull(secondUnmanagedDataDisk); String createdVhdUri1 = firstUnmanagedDataDisk.vhdUri(); String createdVhdUri2 = secondUnmanagedDataDisk.vhdUri(); Assertions.assertNotNull(createdVhdUri1); Assertions.assertNotNull(createdVhdUri2); computeManager.virtualMachines().deleteById(virtualMachine.id()); virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .create(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(1, unmanagedDataDisks.size()); firstUnmanagedDataDisk = null; for (VirtualMachineUnmanagedDataDisk unmanagedDisk : unmanagedDataDisks.values()) { firstUnmanagedDataDisk = unmanagedDisk; break; } Assertions.assertNotNull(firstUnmanagedDataDisk.vhdUri()); Assertions.assertTrue(firstUnmanagedDataDisk.vhdUri().equalsIgnoreCase(createdVhdUri1)); virtualMachine .update() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .apply(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); } @Test public void canUpdateTagsOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); virtualMachine.update().withTag("test", "testValue").apply(); Assertions.assertEquals("testValue", virtualMachine.innerModel().tags().get("test")); Map<String, String> testTags = new HashMap<String, String>(); testTags.put("testTag", "testValue"); virtualMachine.update().withTags(testTags).apply(); Assertions.assertEquals(testTags.get("testTag"), virtualMachine.innerModel().tags().get("testTag")); } @Test public void canRunScriptOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .create(); List<String> installGit = new ArrayList<>(); installGit.add("sudo apt-get update"); installGit.add("sudo apt-get install -y git"); RunCommandResult runResult = virtualMachine.runShellScript(installGit, new ArrayList<RunCommandInputParameter>()); Assertions.assertNotNull(runResult); Assertions.assertNotNull(runResult.value()); Assertions.assertTrue(runResult.value().size() > 0); } @Test @DoNotRecord(skipInPlayback = true) public void canPerformSimulateEvictionOnSpotVirtualMachine() { VirtualMachine virtualMachine = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSpotPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Assertions.assertNotNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertTrue(virtualMachine.osDiskSize() > 0); Disk disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertNotNull(disk); Assertions.assertEquals(DiskState.ATTACHED, disk.innerModel().diskState()); virtualMachine.simulateEviction(); boolean deallocated = false; int pollIntervalInMinutes = 5; for (int i = 0; i < 30; i += pollIntervalInMinutes) { ResourceManagerUtils.sleep(Duration.ofMinutes(pollIntervalInMinutes)); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); if (virtualMachine.powerState() == PowerState.DEALLOCATED) { deallocated = true; break; } } Assertions.assertTrue(deallocated); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); Assertions.assertNotNull(virtualMachine); Assertions.assertNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertEquals(0, virtualMachine.osDiskSize()); disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertEquals(DiskState.RESERVED, disk.innerModel().diskState()); } @Test public void canForceDeleteVirtualMachine() { computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .create(); VirtualMachine virtualMachine = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(virtualMachine); Assertions.assertEquals(Region.fromName("eastus2euap"), virtualMachine.region()); String nicId = virtualMachine.primaryNetworkInterfaceId(); computeManager.virtualMachines().deleteById(virtualMachine.id(), true); try { virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException ex) { virtualMachine = null; Assertions.assertEquals(404, ex.getResponse().getStatusCode()); } Assertions.assertNull(virtualMachine); NetworkInterface nic = networkManager.networkInterfaces().getById(nicId); Assertions.assertNotNull(nic); } @Test public void canCreateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST2; final String publicIpDnsLabel = generateRandomResourceName("pip", 20); Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIpDnsLabel/*, DeleteOptions.DELETE*/) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(DeleteOptions.DELETE, vm1.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm1.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm1.dataDisks().get(1).deleteOptions()); Assertions.assertEquals(vm1.id(), computeManager.virtualMachines().getById(vm1.id()).id()); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); PublicIpAddress publicIpAddress = computeManager.networkManager().publicIpAddresses().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.networkManager().publicIpAddresses().deleteById(publicIpAddress.id()); String secondaryNicName = generateRandomResourceName("nic", 10); Creatable<NetworkInterface> secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable, DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm3 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(DeleteOptions.DETACH, vm3.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm3.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm3.dataDisks().get(1).deleteOptions()); computeManager.virtualMachines().deleteById(vm3.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(3, computeManager.disks().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(2, computeManager.networkManager().networkInterfaces().listByResourceGroup(rgName).stream().count()); } @Test public void canUpdateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST2; Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm1.update() .withNewDataDisk(10) .apply(); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.disks().listByResourceGroup(rgName).stream().count()); Disk disk = computeManager.disks().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.disks().deleteById(disk.id()); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm2.update() .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .apply(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.disks().listByResourceGroup(rgName).stream().count()); } @Test public void canHibernateVirtualMachine() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withSize(VirtualMachineSizeTypes.STANDARD_D2S_V3) .enableHibernation() .create(); Assertions.assertTrue(vm.isHibernationEnabled()); vm.deallocate(true); InstanceViewStatus hibernationStatus = vm.instanceView().statuses().stream() .filter(status -> "HibernationState/Hibernated".equals(status.code())) .findFirst().orElse(null); Assertions.assertNotNull(hibernationStatus); vm.start(); vm.deallocate(); vm.update() .disableHibernation() .apply(); Assertions.assertFalse(vm.isHibernationEnabled()); } @Test public void canOperateVirtualMachine() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.redeploy(); vm.powerOff(true); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.STOPPED, vm.powerState()); vm.start(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.restart(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.deallocate(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.DEALLOCATED, vm.powerState()); } @Test private CreatablesInfo prepareCreatableVirtualMachines( Region region, String vmNamePrefix, String networkNamePrefix, String publicIpNamePrefix, int vmCount) { Creatable<ResourceGroup> resourceGroupCreatable = resourceManager.resourceGroups().define(rgName).withRegion(region); Creatable<StorageAccount> storageAccountCreatable = storageManager .storageAccounts() .define(generateRandomResourceName("stg", 20)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); List<String> networkCreatableKeys = new ArrayList<>(); List<String> publicIpCreatableKeys = new ArrayList<>(); List<Creatable<VirtualMachine>> virtualMachineCreatables = new ArrayList<>(); for (int i = 0; i < vmCount; i++) { Creatable<Network> networkCreatable = networkManager .networks() .define(String.format("%s-%d", networkNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withAddressSpace("10.0.0.0/28"); networkCreatableKeys.add(networkCreatable.key()); Creatable<PublicIpAddress> publicIPAddressCreatable = networkManager .publicIpAddresses() .define(String.format("%s-%d", publicIpNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); publicIpCreatableKeys.add(publicIPAddressCreatable.key()); Creatable<VirtualMachine> virtualMachineCreatable = computeManager .virtualMachines() .define(String.format("%s-%d", vmNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withNewPrimaryNetwork(networkCreatable) .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIPAddressCreatable) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("tirekicker") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withNewStorageAccount(storageAccountCreatable); virtualMachineCreatables.add(virtualMachineCreatable); } CreatablesInfo creatablesInfo = new CreatablesInfo(); creatablesInfo.virtualMachineCreatables = virtualMachineCreatables; creatablesInfo.networkCreatableKeys = networkCreatableKeys; creatablesInfo.publicIpCreatableKeys = publicIpCreatableKeys; return creatablesInfo; } class CreatablesInfo { private List<Creatable<VirtualMachine>> virtualMachineCreatables; List<String> networkCreatableKeys; List<String> publicIpCreatableKeys; } }
class VirtualMachineOperationsTests extends ComputeManagementTest { private String rgName = ""; private String rgName2 = ""; private final Region region = Region.US_EAST; private final Region regionProxPlacementGroup = Region.US_WEST; private final Region regionProxPlacementGroup2 = Region.US_EAST; private final String vmName = "javavm"; private final String proxGroupName = "testproxgroup1"; private final String proxGroupName2 = "testproxgroup2"; private final String availabilitySetName = "availset1"; private final String availabilitySetName2 = "availset2"; private final ProximityPlacementGroupType proxGroupType = ProximityPlacementGroupType.STANDARD; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { rgName = generateRandomResourceName("javacsmrg", 15); rgName2 = generateRandomResourceName("javacsmrg2", 15); super.initializeClients(httpPipeline, profile); } @Override protected void cleanUpResources() { if (rgName != null) { resourceManager.resourceGroups().beginDeleteByName(rgName); } } @Test public void canCreateVirtualMachineWithNetworking() throws Exception { NetworkSecurityGroup nsg = this .networkManager .networkSecurityGroups() .define("nsg") .withRegion(region) .withNewResourceGroup(rgName) .defineRule("rule1") .allowInbound() .fromAnyAddress() .fromPort(80) .toAnyAddress() .toPort(80) .withProtocol(SecurityRuleProtocol.TCP) .attach() .create(); Creatable<Network> networkDefinition = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .defineSubnet("subnet1") .withAddressPrefix("10.0.0.0/29") .withExistingNetworkSecurityGroup(nsg) .attach(); VirtualMachine vm = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork(networkDefinition) .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .create(); NetworkInterface primaryNic = vm.getPrimaryNetworkInterface(); Assertions.assertNotNull(primaryNic); NicIpConfiguration primaryIpConfig = primaryNic.primaryIPConfiguration(); Assertions.assertNotNull(primaryIpConfig); Assertions.assertNotNull(primaryIpConfig.networkId()); Network network = primaryIpConfig.getNetwork(); Assertions.assertNotNull(primaryIpConfig.subnetName()); Subnet subnet = network.subnets().get(primaryIpConfig.subnetName()); Assertions.assertNotNull(subnet); nsg = subnet.getNetworkSecurityGroup(); Assertions.assertNotNull(nsg); Assertions.assertEquals("nsg", nsg.name()); Assertions.assertEquals(1, nsg.securityRules().size()); nsg = primaryIpConfig.getNetworkSecurityGroup(); Assertions.assertEquals("nsg", nsg.name()); } @Test public void canCreateVirtualMachine() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotCreateVirtualMachineSyncPoll() throws Exception { final String mySqlInstallScript = "https: final String installCommand = "bash install_mysql_server_5.6.sh Abc.123x("; Assertions.assertThrows(IllegalStateException.class, () -> { Accepted<VirtualMachine> acceptedVirtualMachine = this.computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("fileUris", Collections.singletonList(mySqlInstallScript)) .withPublicSetting("commandToExecute", installCommand) .attach() .beginCreate(); }); boolean dependentResourceCreated = computeManager.resourceManager().serviceClient().getResourceGroups().checkExistence(rgName); Assertions.assertFalse(dependentResourceCreated); rgName = null; } @Test public void canCreateVirtualMachineSyncPoll() throws Exception { final long defaultDelayInMillis = 10 * 1000; Accepted<VirtualMachine> acceptedVirtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .beginCreate(); VirtualMachine createdVirtualMachine = acceptedVirtualMachine.getActivationResponse().getValue(); Assertions.assertNotEquals("Succeeded", createdVirtualMachine.provisioningState()); LongRunningOperationStatus pollStatus = acceptedVirtualMachine.getActivationResponse().getStatus(); long delayInMills = acceptedVirtualMachine.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : acceptedVirtualMachine.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedVirtualMachine.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : pollResponse.getRetryAfter().toMillis(); } Assertions.assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollStatus); VirtualMachine virtualMachine = acceptedVirtualMachine.getFinalResult(); Assertions.assertEquals("Succeeded", virtualMachine.provisioningState()); Accepted<Void> acceptedDelete = computeManager.virtualMachines() .beginDeleteByResourceGroup(virtualMachine.resourceGroupName(), virtualMachine.name()); pollStatus = acceptedDelete.getActivationResponse().getStatus(); delayInMills = acceptedDelete.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : (int) acceptedDelete.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedDelete.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : (int) pollResponse.getRetryAfter().toMillis(); } boolean deleted = false; try { computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException e) { if (e.getResponse().getStatusCode() == 404 && ("NotFound".equals(e.getValue().getCode()) || "ResourceNotFound".equals(e.getValue().getCode()))) { deleted = true; } } Assertions.assertTrue(deleted); } @Test public void canCreateUpdatePriorityAndPrice() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLowPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withMaxPrice(1000.0) .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); Assertions.assertEquals((Double) 1000.0, foundVM.billingProfile().maxPrice()); Assertions.assertEquals(VirtualMachineEvictionPolicyTypes.DEALLOCATE, foundVM.evictionPolicy()); try { foundVM.update().withMaxPrice(1500.0).apply(); Assertions.assertEquals((Double) 1500.0, foundVM.billingProfile().maxPrice()); Assertions.fail(); } catch (ManagementException e) { } foundVM.deallocate(); foundVM.update().withMaxPrice(2000.0).apply(); foundVM.start(); Assertions.assertEquals((Double) 2000.0, foundVM.billingProfile().maxPrice()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.SPOT).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.SPOT, foundVM.priority()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.LOW).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.LOW, foundVM.priority()); try { foundVM.update().withPriority(VirtualMachinePriorityTypes.REGULAR).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.REGULAR, foundVM.priority()); Assertions.fail(); } catch (ManagementException e) { } computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotUpdateProximityPlacementGroupForVirtualMachine() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); AvailabilitySet setCreated2 = computeManager .availabilitySets() .define(availabilitySetName2) .withRegion(regionProxPlacementGroup2) .withNewResourceGroup(rgName2) .withNewProximityPlacementGroup(proxGroupName2, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName2, setCreated2.name()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated2.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated2.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated2.id().equalsIgnoreCase(setCreated2.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated2.regionName(), setCreated2.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); try { VirtualMachine updatedVm = foundVM.update().withProximityPlacementGroup(setCreated2.proximityPlacementGroup().id()).apply(); } catch (ManagementException clEx) { Assertions .assertTrue( clEx .getMessage() .contains( "Updating proximity placement group of VM javavm is not allowed while the VM is running." + " Please stop/deallocate the VM and retry the operation.")); } computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndAvailabilitySetInSameProximityPlacementGroup() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); VirtualMachine updatedVm = foundVM.update().withoutProximityPlacementGroup().apply(); Assertions.assertNotNull(updatedVm.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, updatedVm.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(updatedVm.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(updatedVm.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(updatedVm.proximityPlacementGroup().availabilitySetIds().get(0))); computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndRelatedResourcesInParallel() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; List<String> networkCreatableKeys = creatablesInfo.networkCreatableKeys; List<String> publicIpCreatableKeys = creatablesInfo.publicIpCreatableKeys; CreatedResources<VirtualMachine> createdVirtualMachines = computeManager.virtualMachines().create(virtualMachineCreatables); Assertions.assertTrue(createdVirtualMachines.size() == count); Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } for (VirtualMachine virtualMachine : createdVirtualMachines.values()) { Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } for (String networkCreatableKey : networkCreatableKeys) { Network createdNetwork = (Network) createdVirtualMachines.createdRelatedResource(networkCreatableKey); Assertions.assertNotNull(createdNetwork); Assertions.assertTrue(networkNames.contains(createdNetwork.name())); } Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } for (String publicIpCreatableKey : publicIpCreatableKeys) { PublicIpAddress createdPublicIpAddress = (PublicIpAddress) createdVirtualMachines.createdRelatedResource(publicIpCreatableKey); Assertions.assertNotNull(createdPublicIpAddress); Assertions.assertTrue(publicIPAddressNames.contains(createdPublicIpAddress.name())); } } @Test public void canStreamParallelCreatedVirtualMachinesAndRelatedResources() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; final Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } final Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } final Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } final CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); final AtomicInteger resourceCount = new AtomicInteger(0); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; computeManager .virtualMachines() .createAsync(virtualMachineCreatables) .map( createdResource -> { if (createdResource instanceof Resource) { Resource resource = (Resource) createdResource; System.out.println("Created: " + resource.id()); if (resource instanceof VirtualMachine) { VirtualMachine virtualMachine = (VirtualMachine) resource; Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } else if (resource instanceof Network) { Network network = (Network) resource; Assertions.assertTrue(networkNames.contains(network.name())); Assertions.assertNotNull(network.id()); } else if (resource instanceof PublicIpAddress) { PublicIpAddress publicIPAddress = (PublicIpAddress) resource; Assertions.assertTrue(publicIPAddressNames.contains(publicIPAddress.name())); Assertions.assertNotNull(publicIPAddress.id()); } } resourceCount.incrementAndGet(); return createdResource; }) .blockLast(); networkNames.forEach(name -> { Assertions.assertNotNull(networkManager.networks().getByResourceGroup(rgName, name)); }); publicIPAddressNames.forEach(name -> { Assertions.assertNotNull(networkManager.publicIpAddresses().getByResourceGroup(rgName, name)); }); Assertions.assertEquals(1, storageManager.storageAccounts().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, networkManager.networkInterfaces().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, resourceCount.get()); } @Test public void canSetStorageAccountForUnmanagedDisk() { final String storageName = generateRandomResourceName("st", 14); StorageAccount storageAccount = storageManager .storageAccounts() .define(storageName) .withRegion(region) .withNewResourceGroup(rgName) .withSku(StorageAccountSkuType.PREMIUM_LRS) .create(); VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .defineUnmanagedDataDisk("disk1") .withNewVhd(100) .withLun(2) .storeAt(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .attach() .defineUnmanagedDataDisk("disk2") .withNewVhd(100) .withLun(3) .storeAt(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .attach() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .create(); Map<Integer, VirtualMachineUnmanagedDataDisk> unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); VirtualMachineUnmanagedDataDisk firstUnmanagedDataDisk = unmanagedDataDisks.get(2); Assertions.assertNotNull(firstUnmanagedDataDisk); VirtualMachineUnmanagedDataDisk secondUnmanagedDataDisk = unmanagedDataDisks.get(3); Assertions.assertNotNull(secondUnmanagedDataDisk); String createdVhdUri1 = firstUnmanagedDataDisk.vhdUri(); String createdVhdUri2 = secondUnmanagedDataDisk.vhdUri(); Assertions.assertNotNull(createdVhdUri1); Assertions.assertNotNull(createdVhdUri2); computeManager.virtualMachines().deleteById(virtualMachine.id()); virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .create(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(1, unmanagedDataDisks.size()); firstUnmanagedDataDisk = null; for (VirtualMachineUnmanagedDataDisk unmanagedDisk : unmanagedDataDisks.values()) { firstUnmanagedDataDisk = unmanagedDisk; break; } Assertions.assertNotNull(firstUnmanagedDataDisk.vhdUri()); Assertions.assertTrue(firstUnmanagedDataDisk.vhdUri().equalsIgnoreCase(createdVhdUri1)); virtualMachine .update() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .apply(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); } @Test public void canUpdateTagsOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); virtualMachine.update().withTag("test", "testValue").apply(); Assertions.assertEquals("testValue", virtualMachine.innerModel().tags().get("test")); Map<String, String> testTags = new HashMap<String, String>(); testTags.put("testTag", "testValue"); virtualMachine.update().withTags(testTags).apply(); Assertions.assertEquals(testTags.get("testTag"), virtualMachine.innerModel().tags().get("testTag")); } @Test public void canRunScriptOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .create(); List<String> installGit = new ArrayList<>(); installGit.add("sudo apt-get update"); installGit.add("sudo apt-get install -y git"); RunCommandResult runResult = virtualMachine.runShellScript(installGit, new ArrayList<RunCommandInputParameter>()); Assertions.assertNotNull(runResult); Assertions.assertNotNull(runResult.value()); Assertions.assertTrue(runResult.value().size() > 0); } @Test @DoNotRecord(skipInPlayback = true) public void canPerformSimulateEvictionOnSpotVirtualMachine() { VirtualMachine virtualMachine = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSpotPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Assertions.assertNotNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertTrue(virtualMachine.osDiskSize() > 0); Disk disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertNotNull(disk); Assertions.assertEquals(DiskState.ATTACHED, disk.innerModel().diskState()); virtualMachine.simulateEviction(); boolean deallocated = false; int pollIntervalInMinutes = 5; for (int i = 0; i < 30; i += pollIntervalInMinutes) { ResourceManagerUtils.sleep(Duration.ofMinutes(pollIntervalInMinutes)); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); if (virtualMachine.powerState() == PowerState.DEALLOCATED) { deallocated = true; break; } } Assertions.assertTrue(deallocated); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); Assertions.assertNotNull(virtualMachine); Assertions.assertNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertEquals(0, virtualMachine.osDiskSize()); disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertEquals(DiskState.RESERVED, disk.innerModel().diskState()); } @Test public void canForceDeleteVirtualMachine() { computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .create(); VirtualMachine virtualMachine = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(virtualMachine); Assertions.assertEquals(Region.fromName("eastus2euap"), virtualMachine.region()); String nicId = virtualMachine.primaryNetworkInterfaceId(); computeManager.virtualMachines().deleteById(virtualMachine.id(), true); try { virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException ex) { virtualMachine = null; Assertions.assertEquals(404, ex.getResponse().getStatusCode()); } Assertions.assertNull(virtualMachine); NetworkInterface nic = networkManager.networkInterfaces().getById(nicId); Assertions.assertNotNull(nic); } @Test public void canCreateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST2; final String publicIpDnsLabel = generateRandomResourceName("pip", 20); Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIpDnsLabel/*, DeleteOptions.DELETE*/) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(DeleteOptions.DELETE, vm1.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm1.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm1.dataDisks().get(1).deleteOptions()); Assertions.assertEquals(vm1.id(), computeManager.virtualMachines().getById(vm1.id()).id()); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); PublicIpAddress publicIpAddress = computeManager.networkManager().publicIpAddresses().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.networkManager().publicIpAddresses().deleteById(publicIpAddress.id()); String secondaryNicName = generateRandomResourceName("nic", 10); Creatable<NetworkInterface> secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable, DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm3 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(DeleteOptions.DETACH, vm3.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm3.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm3.dataDisks().get(1).deleteOptions()); computeManager.virtualMachines().deleteById(vm3.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(3, computeManager.disks().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(2, computeManager.networkManager().networkInterfaces().listByResourceGroup(rgName).stream().count()); } @Test public void canUpdateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST2; Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm1.update() .withNewDataDisk(10) .apply(); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.disks().listByResourceGroup(rgName).stream().count()); Disk disk = computeManager.disks().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.disks().deleteById(disk.id()); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm2.update() .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .apply(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.disks().listByResourceGroup(rgName).stream().count()); } @Test public void canHibernateVirtualMachine() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withSize(VirtualMachineSizeTypes.STANDARD_D2S_V3) .enableHibernation() .create(); Assertions.assertTrue(vm.isHibernationEnabled()); vm.deallocate(true); InstanceViewStatus hibernationStatus = vm.instanceView().statuses().stream() .filter(status -> "HibernationState/Hibernated".equals(status.code())) .findFirst().orElse(null); Assertions.assertNotNull(hibernationStatus); vm.start(); vm.deallocate(); vm.update() .disableHibernation() .apply(); Assertions.assertFalse(vm.isHibernationEnabled()); } @Test public void canOperateVirtualMachine() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.redeploy(); vm.powerOff(true); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.STOPPED, vm.powerState()); vm.start(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.restart(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.deallocate(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.DEALLOCATED, vm.powerState()); } @Test private CreatablesInfo prepareCreatableVirtualMachines( Region region, String vmNamePrefix, String networkNamePrefix, String publicIpNamePrefix, int vmCount) { Creatable<ResourceGroup> resourceGroupCreatable = resourceManager.resourceGroups().define(rgName).withRegion(region); Creatable<StorageAccount> storageAccountCreatable = storageManager .storageAccounts() .define(generateRandomResourceName("stg", 20)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); List<String> networkCreatableKeys = new ArrayList<>(); List<String> publicIpCreatableKeys = new ArrayList<>(); List<Creatable<VirtualMachine>> virtualMachineCreatables = new ArrayList<>(); for (int i = 0; i < vmCount; i++) { Creatable<Network> networkCreatable = networkManager .networks() .define(String.format("%s-%d", networkNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withAddressSpace("10.0.0.0/28"); networkCreatableKeys.add(networkCreatable.key()); Creatable<PublicIpAddress> publicIPAddressCreatable = networkManager .publicIpAddresses() .define(String.format("%s-%d", publicIpNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); publicIpCreatableKeys.add(publicIPAddressCreatable.key()); Creatable<VirtualMachine> virtualMachineCreatable = computeManager .virtualMachines() .define(String.format("%s-%d", vmNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withNewPrimaryNetwork(networkCreatable) .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIPAddressCreatable) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("tirekicker") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withNewStorageAccount(storageAccountCreatable); virtualMachineCreatables.add(virtualMachineCreatable); } CreatablesInfo creatablesInfo = new CreatablesInfo(); creatablesInfo.virtualMachineCreatables = virtualMachineCreatables; creatablesInfo.networkCreatableKeys = networkCreatableKeys; creatablesInfo.publicIpCreatableKeys = publicIpCreatableKeys; return creatablesInfo; } class CreatablesInfo { private List<Creatable<VirtualMachine>> virtualMachineCreatables; List<String> networkCreatableKeys; List<String> publicIpCreatableKeys; } }
I think you can return dataLakeFileAsyncClient.createIfNotExistsWithResponse instead and then you don't have to duplicate the logic of checking again.
public Mono<Response<DataLakeFileAsyncClient>> createFileIfNotExistsWithResponse(String fileName, String permissions, String umask, PathHttpHeaders headers, Map<String, String> metadata, Context context) { DataLakeRequestConditions requestConditions = new DataLakeRequestConditions().setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD); DataLakeFileAsyncClient dataLakeFileAsyncClient = getFileAsyncClient(fileName); return dataLakeFileAsyncClient.createWithResponse(permissions, umask, pathResourceType, headers, metadata, requestConditions, context).onErrorResume(t -> t instanceof DataLakeStorageException && ((DataLakeStorageException) t).getStatusCode() == 409, t -> Mono.empty()) .map(response -> new SimpleResponse<>(response, dataLakeFileAsyncClient)); }
return dataLakeFileAsyncClient.createWithResponse(permissions, umask, pathResourceType,
new DataLakeRequestConditions(); if (!overwrite) { requestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD); }
class DataLakeDirectoryAsyncClient extends DataLakePathAsyncClient { private final ClientLogger logger = new ClientLogger(DataLakeDirectoryAsyncClient.class); /** * Package-private constructor for use by {@link DataLakePathClientBuilder}. * * @param pipeline The pipeline used to send and receive service requests. * @param url The endpoint where to send service requests. * @param serviceVersion The version of the service to receive requests. * @param accountName The storage account name. * @param fileSystemName The file system name. * @param directoryName The directory name. * @param blockBlobAsyncClient The underlying {@link BlobContainerAsyncClient} */ DataLakeDirectoryAsyncClient(HttpPipeline pipeline, String url, DataLakeServiceVersion serviceVersion, String accountName, String fileSystemName, String directoryName, BlockBlobAsyncClient blockBlobAsyncClient) { super(pipeline, url, serviceVersion, accountName, fileSystemName, directoryName, PathResourceType.DIRECTORY, blockBlobAsyncClient); } DataLakeDirectoryAsyncClient(DataLakePathAsyncClient dataLakePathAsyncClient) { super(dataLakePathAsyncClient.getHttpPipeline(), dataLakePathAsyncClient.getAccountUrl(), dataLakePathAsyncClient.getServiceVersion(), dataLakePathAsyncClient.getAccountName(), dataLakePathAsyncClient.getFileSystemName(), Utility.urlEncode(dataLakePathAsyncClient.pathName), PathResourceType.DIRECTORY, dataLakePathAsyncClient.getBlockBlobAsyncClient()); } /** * Gets the URL of the directory represented by this client on the Data Lake service. * * @return the URL. */ public String getDirectoryUrl() { return getPathUrl(); } /** * Gets the path of this directory, not including the name of the resource itself. * * @return The path of the directory. */ public String getDirectoryPath() { return getObjectPath(); } /** * Gets the name of this directory, not including its full path. * * @return The name of the directory. */ public String getDirectoryName() { return getObjectName(); } /** * Deletes a directory. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.delete --> * <pre> * client.delete& * System.out.println& * </pre> * <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.delete --> * * <p>For more information see the * <a href="https: * Docs</a></p> * * @return A reactive response signalling completion. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> delete() { try { return deleteWithResponse(false, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes a directory. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteWithResponse * <pre> * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions& * .setLeaseId& * boolean recursive = false; & * * client.deleteWithResponse& * .subscribe& * </pre> * <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteWithResponse * * <p>For more information see the * <a href="https: * Docs</a></p> * * @param recursive Whether or not to delete all paths beneath the directory. * @param requestConditions {@link DataLakeRequestConditions} * * @return A reactive response signalling completion. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteWithResponse(boolean recursive, DataLakeRequestConditions requestConditions) { try { return withContext(context -> deleteWithResponse(recursive, requestConditions, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new DataLakeFileAsyncClient object by concatenating fileName to the end of * DataLakeDirectoryAsyncClient's URL. The new DataLakeFileAsyncClient uses the same request policy pipeline as the * DataLakeDirectoryAsyncClient. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.getFileAsyncClient * <pre> * DataLakeFileAsyncClient dataLakeFileClient = client.getFileAsyncClient& * </pre> * <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.getFileAsyncClient * * @param fileName A {@code String} representing the name of the file. * @return A new {@link DataLakeFileAsyncClient} object which references the file with the specified name in this * file system. */ public DataLakeFileAsyncClient getFileAsyncClient(String fileName) { Objects.requireNonNull(fileName, "'fileName' can not be set to null"); BlockBlobAsyncClient blockBlobAsyncClient = prepareBuilderAppendPath(fileName).buildBlockBlobAsyncClient(); String pathPrefix = getObjectPath().isEmpty() ? "" : getObjectPath() + "/"; return new DataLakeFileAsyncClient(getHttpPipeline(), getAccountUrl(), getServiceVersion(), getAccountName(), getFileSystemName(), Utility.urlEncode(pathPrefix + Utility.urlDecode(fileName)), blockBlobAsyncClient); } /** * Creates a new file within a directory. By default this method will not overwrite an existing file. * For more information, see the * <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createFile * <pre> * DataLakeFileAsyncClient fileClient = client.createFile& * </pre> * <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createFile * * @param fileName Name of the file to create. * @return A {@link Mono} containing a {@link DataLakeFileAsyncClient} used to interact with the file created. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DataLakeFileAsyncClient> createFile(String fileName) { return createFile(fileName, false); } /** * Creates a new file within a directory. For more information, see the * <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createFile * <pre> * boolean overwrite = false; & * DataLakeFileAsyncClient fClient = client.createFile& * </pre> * <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createFile * * @param fileName Name of the file to create. * @param overwrite Whether or not to overwrite, should the file exist. * @return A {@link Mono} containing a {@link DataLakeFileAsyncClient} used to interact with the file created. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DataLakeFileAsyncClient> createFile(String fileName, boolean overwrite) { DataLakeRequestConditions requestConditions = try { return createFileWithResponse(fileName, null, null, null, null, requestConditions) .flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new file within a directory. If a file with the same name already exists, the file will be * overwritten. For more information, see the * <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createFileWithResponse * <pre> * PathHttpHeaders httpHeaders = new PathHttpHeaders& * .setContentLanguage& * .setContentType& * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions& * .setLeaseId& * String permissions = &quot;permissions&quot;; * String umask = &quot;umask&quot;; * DataLakeFileAsyncClient newFileClient = client.createFileWithResponse& * permissions, umask, httpHeaders, Collections.singletonMap& * & * </pre> * <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createFileWithResponse * * @param fileName Name of the file to create. * @param permissions POSIX access permissions for the file owner, the file owning group, and others. * @param umask Restricts permissions of the file to be created. * @param headers {@link PathHttpHeaders} * @param metadata Metadata to associate with the file. If there is leading or trailing whitespace in any * metadata key or value, it must be removed or encoded. * @param requestConditions {@link DataLakeRequestConditions} * @return A {@link Mono} containing a {@link Response} whose {@link Response * DataLakeFileAsyncClient} used to interact with the file created. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<DataLakeFileAsyncClient>> createFileWithResponse(String fileName, String permissions, String umask, PathHttpHeaders headers, Map<String, String> metadata, DataLakeRequestConditions requestConditions) { try { DataLakeFileAsyncClient dataLakeFileAsyncClient = getFileAsyncClient(fileName); return dataLakeFileAsyncClient.createWithResponse(permissions, umask, headers, metadata, requestConditions) .map(response -> new SimpleResponse<>(response, dataLakeFileAsyncClient)); } catch (RuntimeException ex) { return monoError(logger, ex); } } @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DataLakeFileAsyncClient> createFileIfNotExists(String fileName) { return createFileIfNotExistsWithResponse(fileName, null, null, null, null).flatMap(FluxUtil::toMono); } @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<DataLakeFileAsyncClient>> createFileIfNotExistsWithResponse(String fileName, String permissions, String umask, PathHttpHeaders headers, Map<String, String> metadata) { return createFileIfNotExistsWithResponse(fileName, permissions, umask, headers, metadata, null); } @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<DataLakeFileAsyncClient>> createFileIfNotExistsWithResponse(String fileName, String permissions, String umask, PathHttpHeaders headers, Map<String, String> metadata, Context context) { DataLakeRequestConditions requestConditions = new DataLakeRequestConditions().setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD); DataLakeFileAsyncClient dataLakeFileAsyncClient = getFileAsyncClient(fileName); return dataLakeFileAsyncClient.createWithResponse(permissions, umask, pathResourceType, headers, metadata, requestConditions, context).onErrorResume(t -> t instanceof DataLakeStorageException && ((DataLakeStorageException) t).getStatusCode() == 409, t -> Mono.empty()) .map(response -> new SimpleResponse<>(response, dataLakeFileAsyncClient)); } /* Mono<Response<AppendBlobItem>> createIfNotExistsWithResponse(AppendBlobCreateOptions options, Context context) { options.setRequestConditions(new AppendBlobRequestConditions().setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD)); return createWithResponse(options, context).onErrorResume(t -> t instanceof BlobStorageException && ((BlobStorageException) t).getStatusCode() == 409, t -> Mono.empty()); } .onErrorResume(t -> instanceof DataLakeStorageException && ((DataLakeStorageException) t).getStatusCode() == 409, */ /** * Deletes the specified file in the file system. If the file doesn't exist the operation fails. * For more information see the <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteFile * <pre> * client.deleteFile& * System.out.println& * </pre> * <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteFile * * @param fileName Name of the file to delete. * @return A reactive response signalling completion. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteFile(String fileName) { try { return deleteFileWithResponse(fileName, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes the specified file in the directory. If the file doesn't exist the operation fails. * For more information see the <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteFileWithResponse * <pre> * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions& * .setLeaseId& * * client.deleteFileWithResponse& * .subscribe& * </pre> * <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteFileWithResponse * * @param fileName Name of the file to delete. * @param requestConditions {@link DataLakeRequestConditions} * @return A {@link Mono} containing containing status code and HTTP headers */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteFileWithResponse(String fileName, DataLakeRequestConditions requestConditions) { try { return getFileAsyncClient(fileName).deleteWithResponse(requestConditions); } catch (RuntimeException ex) { return monoError(logger, ex); } } @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteFileIfExists(String fileName) { try { return deleteFileIfExistsWithResponse(fileName, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteFileIfExistsWithResponse(String fileName, DataLakeRequestConditions requestConditions) { return deleteFileIfExistsWithResponse(fileName, requestConditions, null); } @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteFileIfExistsWithResponse(String fileName, DataLakeRequestConditions requestConditions, Context context) { return getFileAsyncClient(fileName).deleteWithResponse(null, requestConditions, context) .onErrorResume(t -> t instanceof DataLakeStorageException && ((DataLakeStorageException)t).getStatusCode() == 404, t -> Mono.empty()); } /** * Creates a new DataLakeDirectoryAsyncClient object by concatenating subdirectoryName to the end of * DataLakeDirectoryAsyncClient's URL. The new DataLakeDirectoryAsyncClient uses the same request policy pipeline * as the DataLakeDirectoryAsyncClient. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.getSubdirectoryAsyncClient * <pre> * DataLakeDirectoryAsyncClient dataLakeDirectoryClient = client.getSubdirectoryAsyncClient& * </pre> * <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.getSubdirectoryAsyncClient * * @param subdirectoryName A {@code String} representing the name of the sub-directory. * @return A new {@link DataLakeDirectoryAsyncClient} object which references the directory with the specified name * in this file system. */ public DataLakeDirectoryAsyncClient getSubdirectoryAsyncClient(String subdirectoryName) { Objects.requireNonNull(subdirectoryName, "'subdirectoryName' can not be set to null"); BlockBlobAsyncClient blockBlobAsyncClient = prepareBuilderAppendPath(subdirectoryName) .buildBlockBlobAsyncClient(); String pathPrefix = getObjectPath().isEmpty() ? "" : getObjectPath() + "/"; return new DataLakeDirectoryAsyncClient(getHttpPipeline(), getAccountUrl(), getServiceVersion(), getAccountName(), getFileSystemName(), Utility.urlEncode(pathPrefix + Utility.urlDecode(subdirectoryName)), blockBlobAsyncClient); } /** * Creates a new sub-directory within a directory. By default this method will not overwrite an existing * sub-directory. For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createSubdirectory * <pre> * DataLakeDirectoryAsyncClient directoryClient = client.createSubdirectory& * </pre> * <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createSubdirectory * * @param subdirectoryName Name of the sub-directory to create. * @return A {@link Mono} containing a {@link DataLakeDirectoryAsyncClient} used to interact with the directory * created. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DataLakeDirectoryAsyncClient> createSubdirectory(String subdirectoryName) { return createSubdirectory(subdirectoryName, false); } /** * Creates a new sub-directory within a directory. For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createSubdirectory * <pre> * boolean overwrite = false; & * DataLakeDirectoryAsyncClient dClient = client.createSubdirectory& * </pre> * <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createSubdirectory * * @param subdirectoryName Name of the sub-directory to create. * @param overwrite Whether or not to overwrite, should the sub directory exist. * @return A {@link Mono} containing a {@link DataLakeDirectoryAsyncClient} used to interact with the directory * created. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DataLakeDirectoryAsyncClient> createSubdirectory(String subdirectoryName, boolean overwrite) { DataLakeRequestConditions requestConditions = new DataLakeRequestConditions(); if (!overwrite) { requestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD); } try { return createSubdirectoryWithResponse(subdirectoryName, null, null, null, null, requestConditions) .flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new sub-directory within a directory. If a sub-directory with the same name already exists, the * sub-directory will be overwritten. For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createSubdirectoryWithResponse * <pre> * PathHttpHeaders httpHeaders = new PathHttpHeaders& * .setContentLanguage& * .setContentType& * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions& * .setLeaseId& * String permissions = &quot;permissions&quot;; * String umask = &quot;umask&quot;; * DataLakeDirectoryAsyncClient newDirectoryClient = client.createSubdirectoryWithResponse& * directoryName, permissions, umask, httpHeaders, Collections.singletonMap& * requestConditions * & * </pre> * <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createSubdirectoryWithResponse * * @param subdirectoryName Name of the sub-directory to create. * @param permissions POSIX access permissions for the sub-directory owner, the sub-directory owning group, and * others. * @param umask Restricts permissions of the sub-directory to be created. * @param headers {@link PathHttpHeaders} * @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any * metadata key or value, it must be removed or encoded. * @param requestConditions {@link DataLakeRequestConditions} * @return A {@link Mono} containing a {@link Response} whose {@link Response * DataLakeDirectoryAsyncClient} used to interact with the sub-directory created. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<DataLakeDirectoryAsyncClient>> createSubdirectoryWithResponse(String subdirectoryName, String permissions, String umask, PathHttpHeaders headers, Map<String, String> metadata, DataLakeRequestConditions requestConditions) { try { DataLakeDirectoryAsyncClient dataLakeDirectoryAsyncClient = getSubdirectoryAsyncClient(subdirectoryName); return dataLakeDirectoryAsyncClient.createWithResponse(permissions, umask, headers, metadata, requestConditions).map(response -> new SimpleResponse<>(response, dataLakeDirectoryAsyncClient)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes the specified sub-directory in the directory. If the sub-directory doesn't exist or is not empty the * operation fails. * For more information see the <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteSubdirectory * <pre> * client.deleteSubdirectory& * System.out.println& * </pre> * <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteSubdirectory * * @param subdirectoryName Name of the sub-directory to delete. * @return A reactive response signalling completion. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteSubdirectory(String subdirectoryName) { try { return deleteSubdirectoryWithResponse(subdirectoryName, false, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes the specified sub-directory in the directory. If the sub-directory doesn't exist or is not empty the * operation fails. * For more information see the <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteSubdirectoryWithResponse * <pre> * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions& * .setLeaseId& * boolean recursive = false; & * * client.deleteSubdirectoryWithResponse& * .subscribe& * </pre> * <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteSubdirectoryWithResponse * * @param directoryName Name of the sub-directory to delete. * @param recursive Whether or not to delete all paths beneath the sub-directory. * @param requestConditions {@link DataLakeRequestConditions} * @return A {@link Mono} containing containing status code and HTTP headers */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteSubdirectoryWithResponse(String directoryName, boolean recursive, DataLakeRequestConditions requestConditions) { try { return getSubdirectoryAsyncClient(directoryName).deleteWithResponse(recursive, requestConditions); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Moves the directory to another location within the file system. * For more information see the * <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.rename * <pre> * DataLakeDirectoryAsyncClient renamedClient = client.rename& * System.out.println& * </pre> * <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.rename * * @param destinationFileSystem The file system of the destination within the account. * {@code null} for the current file system. * @param destinationPath Relative path from the file system to rename the directory to, excludes the file system * name. For example if you want to move a directory with fileSystem = "myfilesystem", path = "mydir/mysubdir" to * another path in myfilesystem (ex: newdir) then set the destinationPath = "newdir" * @return A {@link Mono} containing a {@link DataLakeDirectoryAsyncClient} used to interact with the new directory * created. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DataLakeDirectoryAsyncClient> rename(String destinationFileSystem, String destinationPath) { try { return renameWithResponse(destinationFileSystem, destinationPath, null, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Moves the directory to another location within the file system. * For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.renameWithResponse * <pre> * DataLakeRequestConditions sourceRequestConditions = new DataLakeRequestConditions& * .setLeaseId& * DataLakeRequestConditions destinationRequestConditions = new DataLakeRequestConditions& * * DataLakeDirectoryAsyncClient newRenamedClient = client.renameWithResponse& * sourceRequestConditions, destinationRequestConditions& * System.out.println& * </pre> * <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.renameWithResponse * * @param destinationFileSystem The file system of the destination within the account. * {@code null} for the current file system. * @param destinationPath Relative path from the file system to rename the directory to, excludes the file system * name. For example if you want to move a directory with fileSystem = "myfilesystem", path = "mydir/mysubdir" to * another path in myfilesystem (ex: newdir) then set the destinationPath = "newdir" * @param sourceRequestConditions {@link DataLakeRequestConditions} against the source. * @param destinationRequestConditions {@link DataLakeRequestConditions} against the destination. * @return A {@link Mono} containing a {@link Response} whose {@link Response * DataLakeDirectoryAsyncClient} used to interact with the directory created. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<DataLakeDirectoryAsyncClient>> renameWithResponse(String destinationFileSystem, String destinationPath, DataLakeRequestConditions sourceRequestConditions, DataLakeRequestConditions destinationRequestConditions) { try { return withContext(context -> renameWithResponse(destinationFileSystem, destinationPath, sourceRequestConditions, destinationRequestConditions, context)).map( response -> new SimpleResponse<>(response, new DataLakeDirectoryAsyncClient(response.getValue()))); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Returns a reactive Publisher emitting all the files/directories in this directory lazily as needed. For more * information, see the <a href="https: * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.listPaths --> * <pre> * client.listPaths& * </pre> * <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.listPaths --> * * @return A reactive response emitting the list of files/directories. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<PathItem> listPaths() { return this.listPaths(false, false, null); } /** * Returns a reactive Publisher emitting all the files/directories in this directory lazily as needed. For more * information, see the <a href="https: * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.listPaths * <pre> * client.listPaths& * .subscribe& * </pre> * <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.listPaths * * @param recursive Specifies if the call should recursively include all paths. * @param userPrincipleNameReturned If "true", the user identity values returned in the x-ms-owner, x-ms-group, * and x-ms-acl response headers will be transformed from Azure Active Directory Object IDs to User Principal Names. * If "false", the values will be returned as Azure Active Directory Object IDs. * The default value is false. Note that group and application Object IDs are not translated because they do not * have unique friendly names. * @param maxResults Specifies the maximum number of blobs to return per page, including all BlobPrefix elements. If * the request does not specify maxResults or specifies a value greater than 5,000, the server will return up to * 5,000 items per page. * @return A reactive response emitting the list of files/directories. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<PathItem> listPaths(boolean recursive, boolean userPrincipleNameReturned, Integer maxResults) { try { return listPathsWithOptionalTimeout(recursive, userPrincipleNameReturned, maxResults, null); } catch (RuntimeException ex) { return pagedFluxError(logger, ex); } } PagedFlux<PathItem> listPathsWithOptionalTimeout(boolean recursive, boolean userPrincipleNameReturned, Integer maxResults, Duration timeout) { BiFunction<String, Integer, Mono<PagedResponse<PathItem>>> func = (marker, pageSize) -> listPathsSegment(marker, recursive, userPrincipleNameReturned, pageSize == null ? maxResults : pageSize, timeout) .map(response -> { List<PathItem> value = response.getValue() == null ? Collections.emptyList() : response.getValue().getPaths().stream() .map(Transforms::toPathItem) .collect(Collectors.toList()); return new PagedResponseBase<>( response.getRequest(), response.getStatusCode(), response.getHeaders(), value, response.getDeserializedHeaders().getXMsContinuation(), response.getDeserializedHeaders()); }); return new PagedFlux<>(pageSize -> func.apply(null, pageSize), func); } private Mono<FileSystemsListPathsResponse> listPathsSegment(String marker, boolean recursive, boolean userPrincipleNameReturned, Integer maxResults, Duration timeout) { return StorageImplUtils.applyOptionalTimeout( this.fileSystemDataLakeStorage.getFileSystems().listPathsWithResponseAsync( recursive, null, null, marker, getDirectoryPath(), maxResults, userPrincipleNameReturned, Context.NONE), timeout); } /** * Prepares a SpecializedBlobClientBuilder with the pathname appended to the end of the current BlockBlobClient's * url * @param pathName The name of the path to append * @return {@link SpecializedBlobClientBuilder} */ SpecializedBlobClientBuilder prepareBuilderAppendPath(String pathName) { String blobUrl = DataLakeImplUtils.endpointToDesiredEndpoint(getPathUrl(), "blob", "dfs"); return new SpecializedBlobClientBuilder() .pipeline(getHttpPipeline()) .serviceVersion(TransformUtils.toBlobServiceVersion(getServiceVersion())) .endpoint(StorageImplUtils.appendToUrlPath(blobUrl, pathName).toString()); } }
class DataLakeDirectoryAsyncClient extends DataLakePathAsyncClient { private static final ClientLogger LOGGER = new ClientLogger(DataLakeDirectoryAsyncClient.class); /** * Package-private constructor for use by {@link DataLakePathClientBuilder}. * * @param pipeline The pipeline used to send and receive service requests. * @param url The endpoint where to send service requests. * @param serviceVersion The version of the service to receive requests. * @param accountName The storage account name. * @param fileSystemName The file system name. * @param directoryName The directory name. * @param blockBlobAsyncClient The underlying {@link BlobContainerAsyncClient} */ DataLakeDirectoryAsyncClient(HttpPipeline pipeline, String url, DataLakeServiceVersion serviceVersion, String accountName, String fileSystemName, String directoryName, BlockBlobAsyncClient blockBlobAsyncClient, AzureSasCredential sasToken) { super(pipeline, url, serviceVersion, accountName, fileSystemName, directoryName, PathResourceType.DIRECTORY, blockBlobAsyncClient, sasToken); } DataLakeDirectoryAsyncClient(DataLakePathAsyncClient dataLakePathAsyncClient) { super(dataLakePathAsyncClient.getHttpPipeline(), dataLakePathAsyncClient.getAccountUrl(), dataLakePathAsyncClient.getServiceVersion(), dataLakePathAsyncClient.getAccountName(), dataLakePathAsyncClient.getFileSystemName(), Utility.urlEncode(dataLakePathAsyncClient.pathName), PathResourceType.DIRECTORY, dataLakePathAsyncClient.getBlockBlobAsyncClient(), dataLakePathAsyncClient.getSasToken()); } /** * Gets the URL of the directory represented by this client on the Data Lake service. * * @return the URL. */ public String getDirectoryUrl() { return getPathUrl(); } /** * Gets the path of this directory, not including the name of the resource itself. * * @return The path of the directory. */ public String getDirectoryPath() { return getObjectPath(); } /** * Gets the name of this directory, not including its full path. * * @return The name of the directory. */ public String getDirectoryName() { return getObjectName(); } /** * Deletes a directory. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.delete --> * <pre> * client.delete& * System.out.println& * </pre> * <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.delete --> * * <p>For more information see the * <a href="https: * Docs</a></p> * * @return A reactive response signalling completion. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> delete() { return deleteWithResponse(false, null).flatMap(FluxUtil::toMono); } /** * Deletes a directory. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteWithResponse * <pre> * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions& * .setLeaseId& * boolean recursive = false; & * * client.deleteWithResponse& * .subscribe& * </pre> * <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteWithResponse * * <p>For more information see the * <a href="https: * Docs</a></p> * * @param recursive Whether to delete all paths beneath the directory. * @param requestConditions {@link DataLakeRequestConditions} * * @return A reactive response signalling completion. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteWithResponse(boolean recursive, DataLakeRequestConditions requestConditions) { try { return withContext(context -> deleteWithResponse(recursive, requestConditions, context)); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } /** * Deletes a directory if it exists. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteIfExists --> * <pre> * client.deleteIfExists& * if & * System.out.println& * & * System.out.println& * & * & * </pre> * <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteIfExists --> * * <p>For more information see the * <a href="https: * Docs</a></p> * * @return a reactive response signaling completion. {@code true} indicates that the directory was successfully * deleted, {@code true} indicates that the directory did not exist. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Boolean> deleteIfExists() { return deleteIfExistsWithResponse(new DataLakePathDeleteOptions()) .map(response -> response.getStatusCode() != 404); } /** * Deletes a directory if it exists. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteIfExistsWithResponse * <pre> * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions& * .setLeaseId& * boolean recursive = false; & * DataLakePathDeleteOptions options = new DataLakePathDeleteOptions& * .setRequestConditions& * * client.deleteIfExistsWithResponse& * if & * System.out.println& * & * System.out.println& * & * & * </pre> * <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteIfExistsWithResponse * * <p>For more information see the * <a href="https: * Docs</a></p> * * @param options {@link DataLakePathDeleteOptions} * * @return A reactive response signaling completion. If {@link Response}'s status code is 200, the directory was * successfully deleted. If status code is 404, the directory does not exist. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteIfExistsWithResponse(DataLakePathDeleteOptions options) { try { options = options == null ? new DataLakePathDeleteOptions() : options; return deleteWithResponse(options.getIsRecursive(), options.getRequestConditions()).onErrorResume(t -> t instanceof DataLakeStorageException && ((DataLakeStorageException) t).getStatusCode() == 404, t -> { HttpResponse response = ((DataLakeStorageException) t).getResponse(); return Mono.just(new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null)); }); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } /** * Creates a new DataLakeFileAsyncClient object by concatenating fileName to the end of * DataLakeDirectoryAsyncClient's URL. The new DataLakeFileAsyncClient uses the same request policy pipeline as the * DataLakeDirectoryAsyncClient. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.getFileAsyncClient * <pre> * DataLakeFileAsyncClient dataLakeFileClient = client.getFileAsyncClient& * </pre> * <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.getFileAsyncClient * * @param fileName A {@code String} representing the name of the file. * @return A new {@link DataLakeFileAsyncClient} object which references the file with the specified name in this * file system. */ public DataLakeFileAsyncClient getFileAsyncClient(String fileName) { Objects.requireNonNull(fileName, "'fileName' can not be set to null"); BlockBlobAsyncClient blockBlobAsyncClient = prepareBuilderAppendPath(fileName).buildBlockBlobAsyncClient(); String pathPrefix = getObjectPath().isEmpty() ? "" : getObjectPath() + "/"; return new DataLakeFileAsyncClient(getHttpPipeline(), getAccountUrl(), getServiceVersion(), getAccountName(), getFileSystemName(), Utility.urlEncode(pathPrefix + Utility.urlDecode(fileName)), blockBlobAsyncClient, this.getSasToken()); } /** * Creates a new file within a directory. By default, this method will not overwrite an existing file. * For more information, see the * <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createFile * <pre> * DataLakeFileAsyncClient fileClient = client.createFile& * </pre> * <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createFile * * @param fileName Name of the file to create. * @return A {@link Mono} containing a {@link DataLakeFileAsyncClient} used to interact with the file created. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DataLakeFileAsyncClient> createFile(String fileName) { return createFile(fileName, false); } /** * Creates a new file within a directory. For more information, see the * <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createFile * <pre> * boolean overwrite = false; & * DataLakeFileAsyncClient fClient = client.createFile& * </pre> * <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createFile * * @param fileName Name of the file to create. * @param overwrite Whether to overwrite, should the file exist. * @return A {@link Mono} containing a {@link DataLakeFileAsyncClient} used to interact with the file created. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DataLakeFileAsyncClient> createFile(String fileName, boolean overwrite) { DataLakeRequestConditions requestConditions = return createFileWithResponse(fileName, null, null, null, null, requestConditions).flatMap(FluxUtil::toMono); } /** * Creates a new file within a directory. If a file with the same name already exists, the file will be * overwritten. For more information, see the * <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createFileWithResponse * <pre> * PathHttpHeaders httpHeaders = new PathHttpHeaders& * .setContentLanguage& * .setContentType& * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions& * .setLeaseId& * String permissions = &quot;permissions&quot;; * String umask = &quot;umask&quot;; * DataLakeFileAsyncClient newFileClient = client.createFileWithResponse& * permissions, umask, httpHeaders, Collections.singletonMap& * & * </pre> * <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createFileWithResponse * * @param fileName Name of the file to create. * @param permissions POSIX access permissions for the file owner, the file owning group, and others. * @param umask Restricts permissions of the file to be created. * @param headers {@link PathHttpHeaders} * @param metadata Metadata to associate with the file. If there is leading or trailing whitespace in any * metadata key or value, it must be removed or encoded. * @param requestConditions {@link DataLakeRequestConditions} * @return A {@link Mono} containing a {@link Response} whose {@link Response * DataLakeFileAsyncClient} used to interact with the file created. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<DataLakeFileAsyncClient>> createFileWithResponse(String fileName, String permissions, String umask, PathHttpHeaders headers, Map<String, String> metadata, DataLakeRequestConditions requestConditions) { DataLakeFileAsyncClient dataLakeFileAsyncClient; try { dataLakeFileAsyncClient = getFileAsyncClient(fileName); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } return dataLakeFileAsyncClient.createWithResponse(permissions, umask, headers, metadata, requestConditions) .map(response -> new SimpleResponse<>(response, dataLakeFileAsyncClient)); } /** * Creates a new file within a directory if it does not exist. By default this method will not overwrite an existing * file. For more information, see the * <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createFileIfNotExists * <pre> * DataLakeFileAsyncClient fileClient = client.createFileIfNotExists& * </pre> * <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createFileIfNotExists * * @param fileName Name of the file to create. * @return A {@link Mono} containing a {@link DataLakeFileAsyncClient} used to interact with the file created. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DataLakeFileAsyncClient> createFileIfNotExists(String fileName) { return createFileIfNotExistsWithResponse(fileName, new DataLakePathCreateOptions()).flatMap(FluxUtil::toMono); } /** * Creates a new file within a directory if it does not exist. For more information, see the * <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createFileIfNotExistsWithResponse * <pre> * PathHttpHeaders headers = new PathHttpHeaders& * .setContentLanguage& * .setContentType& * String permissions = &quot;permissions&quot;; * String umask = &quot;umask&quot;; * DataLakePathCreateOptions options = new DataLakePathCreateOptions& * .setPermissions& * * client.createFileIfNotExistsWithResponse& * if & * System.out.println& * & * System.out.println& * & * & * </pre> * <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createFileIfNotExistsWithResponse * * @param fileName Name of the file to create. * @param options {@link DataLakePathCreateOptions} * metadata key or value, it must be removed or encoded. * @return A {@link Mono} containing a {@link Response} whose {@link Response * {@link DataLakeFileAsyncClient} used to interact with the file created. If {@link Response}'s status code is 201, * a new file was successfully created. If status code is 409, a file with the same name already existed * at this location. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<DataLakeFileAsyncClient>> createFileIfNotExistsWithResponse(String fileName, DataLakePathCreateOptions options) { DataLakeFileAsyncClient dataLakeFileAsyncClient = getFileAsyncClient(fileName); try { return dataLakeFileAsyncClient.createIfNotExistsWithResponse(options) .map(response -> new SimpleResponse<>(response, dataLakeFileAsyncClient)); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } /** * Deletes the specified file in the file system. If the file doesn't exist the operation fails. * For more information see the <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteFile * <pre> * client.deleteFile& * System.out.println& * </pre> * <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteFile * * @param fileName Name of the file to delete. * @return A reactive response signalling completion. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteFile(String fileName) { return deleteFileWithResponse(fileName, null).flatMap(FluxUtil::toMono); } /** * Deletes the specified file in the directory. If the file doesn't exist the operation fails. * For more information see the <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteFileWithResponse * <pre> * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions& * .setLeaseId& * * client.deleteFileWithResponse& * .subscribe& * </pre> * <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteFileWithResponse * * @param fileName Name of the file to delete. * @param requestConditions {@link DataLakeRequestConditions} * @return A {@link Mono} containing status code and HTTP headers */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteFileWithResponse(String fileName, DataLakeRequestConditions requestConditions) { DataLakeFileAsyncClient dataLakeFileAsyncClient; try { dataLakeFileAsyncClient = getFileAsyncClient(fileName); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } return dataLakeFileAsyncClient.deleteWithResponse(requestConditions); } /** * Deletes the specified file in the file system if it exists. * For more information see the <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteFileIfExists * <pre> * client.deleteFileIfExists& * if & * System.out.println& * & * System.out.println& * & * & * </pre> * <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteFileIfExists * * @param fileName Name of the file to delete. * @return a reactive response signaling completion. {@code true} indicates that the specified file was successfully * deleted, {@code false} indicates that the specified file did not exist. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Boolean> deleteFileIfExists(String fileName) { return deleteFileIfExistsWithResponse(fileName, new DataLakePathDeleteOptions()) .map(response -> response.getStatusCode() != 404); } /** * Deletes the specified file in the directory if it exists. * For more information see the <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteFileIfExistsWithResponse * <pre> * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions& * .setLeaseId& * DataLakePathDeleteOptions options = new DataLakePathDeleteOptions& * .setRequestConditions& * * client.deleteFileIfExistsWithResponse& * if & * System.out.println& * & * System.out.println& * & * & * </pre> * <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteFileIfExistsWithResponse * * @param fileName Name of the file to delete. * @param options {@link DataLakePathDeleteOptions} * @return A reactive response signaling completion. If {@link Response}'s status code is 200, the specified file was * successfully deleted. If status code is 404, the specified file does not exist. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteFileIfExistsWithResponse(String fileName, DataLakePathDeleteOptions options) { try { return withContext(context -> this.deleteFileIfExistsWithResponse(fileName, options, context)); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } Mono<Response<Void>> deleteFileIfExistsWithResponse(String fileName, DataLakePathDeleteOptions options, Context context) { try { return getFileAsyncClient(fileName).deleteIfExistsWithResponse(options, context); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } /** * Creates a new DataLakeDirectoryAsyncClient object by concatenating subdirectoryName to the end of * DataLakeDirectoryAsyncClient's URL. The new DataLakeDirectoryAsyncClient uses the same request policy pipeline * as the DataLakeDirectoryAsyncClient. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.getSubdirectoryAsyncClient * <pre> * DataLakeDirectoryAsyncClient dataLakeDirectoryClient = client.getSubdirectoryAsyncClient& * </pre> * <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.getSubdirectoryAsyncClient * * @param subdirectoryName A {@code String} representing the name of the sub-directory. * @return A new {@link DataLakeDirectoryAsyncClient} object which references the directory with the specified name * in this file system. */ public DataLakeDirectoryAsyncClient getSubdirectoryAsyncClient(String subdirectoryName) { Objects.requireNonNull(subdirectoryName, "'subdirectoryName' can not be set to null"); BlockBlobAsyncClient blockBlobAsyncClient = prepareBuilderAppendPath(subdirectoryName) .buildBlockBlobAsyncClient(); String pathPrefix = getObjectPath().isEmpty() ? "" : getObjectPath() + "/"; return new DataLakeDirectoryAsyncClient(getHttpPipeline(), getAccountUrl(), getServiceVersion(), getAccountName(), getFileSystemName(), Utility.urlEncode(pathPrefix + Utility.urlDecode(subdirectoryName)), blockBlobAsyncClient, this.getSasToken()); } /** * Creates a new sub-directory within a directory. By default, this method will not overwrite an existing * sub-directory. For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createSubdirectory * <pre> * DataLakeDirectoryAsyncClient directoryClient = client.createSubdirectory& * </pre> * <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createSubdirectory * * @param subdirectoryName Name of the sub-directory to create. * @return A {@link Mono} containing a {@link DataLakeDirectoryAsyncClient} used to interact with the directory * created. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DataLakeDirectoryAsyncClient> createSubdirectory(String subdirectoryName) { return createSubdirectory(subdirectoryName, false); } /** * Creates a new sub-directory within a directory. For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createSubdirectory * <pre> * boolean overwrite = false; & * DataLakeDirectoryAsyncClient dClient = client.createSubdirectory& * </pre> * <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createSubdirectory * * @param subdirectoryName Name of the sub-directory to create. * @param overwrite Whether to overwrite, should the subdirectory exist. * @return A {@link Mono} containing a {@link DataLakeDirectoryAsyncClient} used to interact with the directory * created. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DataLakeDirectoryAsyncClient> createSubdirectory(String subdirectoryName, boolean overwrite) { DataLakeRequestConditions requestConditions = new DataLakeRequestConditions(); if (!overwrite) { requestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD); } return createSubdirectoryWithResponse(subdirectoryName, null, null, null, null, requestConditions) .flatMap(FluxUtil::toMono); } /** * Creates a new sub-directory within a directory. If a sub-directory with the same name already exists, the * sub-directory will be overwritten. For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createSubdirectoryWithResponse * <pre> * PathHttpHeaders httpHeaders = new PathHttpHeaders& * .setContentLanguage& * .setContentType& * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions& * .setLeaseId& * String permissions = &quot;permissions&quot;; * String umask = &quot;umask&quot;; * DataLakeDirectoryAsyncClient newDirectoryClient = client.createSubdirectoryWithResponse& * directoryName, permissions, umask, httpHeaders, Collections.singletonMap& * requestConditions * & * </pre> * <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createSubdirectoryWithResponse * * @param subdirectoryName Name of the sub-directory to create. * @param permissions POSIX access permissions for the sub-directory owner, the sub-directory owning group, and * others. * @param umask Restricts permissions of the sub-directory to be created. * @param headers {@link PathHttpHeaders} * @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any * metadata key or value, it must be removed or encoded. * @param requestConditions {@link DataLakeRequestConditions} * @return A {@link Mono} containing a {@link Response} whose {@link Response * DataLakeDirectoryAsyncClient} used to interact with the sub-directory created. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<DataLakeDirectoryAsyncClient>> createSubdirectoryWithResponse(String subdirectoryName, String permissions, String umask, PathHttpHeaders headers, Map<String, String> metadata, DataLakeRequestConditions requestConditions) { DataLakeDirectoryAsyncClient dataLakeDirectoryAsyncClient; try { dataLakeDirectoryAsyncClient = getSubdirectoryAsyncClient(subdirectoryName); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } return dataLakeDirectoryAsyncClient.createWithResponse(permissions, umask, headers, metadata, requestConditions) .map(response -> new SimpleResponse<>(response, dataLakeDirectoryAsyncClient)); } /** * Creates a new subdirectory within a directory if it does not exist. For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createSubdirectoryIfNotExists * <pre> * DataLakeDirectoryAsyncClient subdirectoryClient = client.createSubdirectoryIfNotExists& * </pre> * <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createSubdirectoryIfNotExists * * @param subdirectoryName Name of the sub-directory to create. * @return A {@link Mono} containing a {@link DataLakeDirectoryAsyncClient} used to interact with the subdirectory * created. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DataLakeDirectoryAsyncClient> createSubdirectoryIfNotExists(String subdirectoryName) { return createSubdirectoryIfNotExistsWithResponse(subdirectoryName, new DataLakePathCreateOptions()) .flatMap(FluxUtil::toMono); } /** * Creates a new sub-directory within a directory if it does not exist. For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createSubdirectoryIfNotExistsWithResponse * <pre> * PathHttpHeaders headers = new PathHttpHeaders& * .setContentLanguage& * .setContentType& * String permissions = &quot;permissions&quot;; * String umask = &quot;umask&quot;; * DataLakePathCreateOptions options = new DataLakePathCreateOptions& * .setPermissions& * * client.createSubdirectoryIfNotExistsWithResponse& * if & * System.out.println& * & * System.out.println& * & * & * </pre> * <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createSubdirectoryIfNotExistsWithResponse * * @param subdirectoryName Name of the subdirectory to create. * @param options {@link DataLakePathCreateOptions} * @return A {@link Mono} containing a {@link Response} whose {@link Response * {@link DataLakeDirectoryAsyncClient} used to interact with the subdirectory created. If {@link Response}'s status * code is 201, a new subdirectory was successfully created. If status code is 409, a subdirectory with the same * name already existed at this location. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<DataLakeDirectoryAsyncClient>> createSubdirectoryIfNotExistsWithResponse( String subdirectoryName, DataLakePathCreateOptions options) { options = options == null ? new DataLakePathCreateOptions() : options; options.setRequestConditions(new DataLakeRequestConditions() .setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD)); try { return createSubdirectoryWithResponse(subdirectoryName, options.getPermissions(), options.getUmask(), options.getPathHttpHeaders(), options.getMetadata(), options.getRequestConditions()) .onErrorResume(t -> t instanceof DataLakeStorageException && ((DataLakeStorageException) t) .getStatusCode() == 409, t -> { HttpResponse response = ((DataLakeStorageException) t).getResponse(); return Mono.just(new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), getSubdirectoryAsyncClient(subdirectoryName))); }); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } /** * Deletes the specified sub-directory in the directory. If the sub-directory doesn't exist or is not empty the * operation fails. * For more information see the <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteSubdirectory * <pre> * client.deleteSubdirectory& * System.out.println& * </pre> * <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteSubdirectory * * @param subdirectoryName Name of the sub-directory to delete. * @return A reactive response signalling completion. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteSubdirectory(String subdirectoryName) { return deleteSubdirectoryWithResponse(subdirectoryName, false, null).flatMap(FluxUtil::toMono); } /** * Deletes the specified sub-directory in the directory. If the sub-directory doesn't exist or is not empty the * operation fails. * For more information see the <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteSubdirectoryWithResponse * <pre> * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions& * .setLeaseId& * boolean recursive = false; & * * client.deleteSubdirectoryWithResponse& * .subscribe& * </pre> * <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteSubdirectoryWithResponse * * @param directoryName Name of the sub-directory to delete. * @param recursive Whether to delete all paths beneath the sub-directory. * @param requestConditions {@link DataLakeRequestConditions} * @return A {@link Mono} containing status code and HTTP headers */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteSubdirectoryWithResponse(String directoryName, boolean recursive, DataLakeRequestConditions requestConditions) { DataLakeDirectoryAsyncClient dataLakeDirectoryAsyncClient; try { dataLakeDirectoryAsyncClient = getSubdirectoryAsyncClient(directoryName); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } return dataLakeDirectoryAsyncClient.deleteWithResponse(recursive, requestConditions); } /** * Deletes the specified subdirectory in the directory if it exists. * For more information see the <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteSubdirectoryIfExists * <pre> * client.deleteSubdirectoryIfExists& * if & * System.out.println& * & * System.out.println& * & * & * </pre> * <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteSubdirectoryIfExists * * @param subdirectoryName Name of the subdirectory to delete. * @return A reactive response signaling completion. {@code true} indicates that the subdirectory was deleted. * {@code false} indicates the specified subdirectory does not exist. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Boolean> deleteSubdirectoryIfExists(String subdirectoryName) { return deleteSubdirectoryIfExistsWithResponse(subdirectoryName, new DataLakePathDeleteOptions()) .map(response -> response.getStatusCode() != 404); } /** * Deletes the specified subdirectory in the directory if it exists. * For more information see the <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteSubdirectoryIfExistsWithResponse * <pre> * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions& * .setLeaseId& * boolean recursive = false; & * DataLakePathDeleteOptions options = new DataLakePathDeleteOptions& * .setRequestConditions& * * client.deleteSubdirectoryIfExistsWithResponse& * if & * System.out.println& * & * System.out.println& * & * & * </pre> * <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteSubdirectoryIfExistsWithResponse * * @param directoryName Name of the subdirectory to delete. * @param options {@link DataLakePathDeleteOptions} * @return A reactive response signaling completion. If {@link Response}'s status code is 200, the specified subdirectory * was successfully deleted. If status code is 404, the specified subdirectory does not exist. * */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteSubdirectoryIfExistsWithResponse(String directoryName, DataLakePathDeleteOptions options) { try { return deleteSubdirectoryWithResponse(directoryName, options.getIsRecursive(), options.getRequestConditions()) .onErrorResume(t -> t instanceof DataLakeStorageException && ((DataLakeStorageException) t).getStatusCode() == 404, t -> { HttpResponse response = ((DataLakeStorageException) t).getResponse(); return Mono.just(new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null)); }); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } /** * Moves the directory to another location within the file system. * For more information see the * <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.rename * <pre> * DataLakeDirectoryAsyncClient renamedClient = client.rename& * System.out.println& * </pre> * <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.rename * * @param destinationFileSystem The file system of the destination within the account. * {@code null} for the current file system. * @param destinationPath Relative path from the file system to rename the directory to, excludes the file system * name. For example if you want to move a directory with fileSystem = "myfilesystem", path = "mydir/mysubdir" to * another path in myfilesystem (ex: newdir) then set the destinationPath = "newdir" * @return A {@link Mono} containing a {@link DataLakeDirectoryAsyncClient} used to interact with the new directory * created. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DataLakeDirectoryAsyncClient> rename(String destinationFileSystem, String destinationPath) { return renameWithResponse(destinationFileSystem, destinationPath, null, null).flatMap(FluxUtil::toMono); } /** * Moves the directory to another location within the file system. * For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.renameWithResponse * <pre> * DataLakeRequestConditions sourceRequestConditions = new DataLakeRequestConditions& * .setLeaseId& * DataLakeRequestConditions destinationRequestConditions = new DataLakeRequestConditions& * * DataLakeDirectoryAsyncClient newRenamedClient = client.renameWithResponse& * sourceRequestConditions, destinationRequestConditions& * System.out.println& * </pre> * <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.renameWithResponse * * @param destinationFileSystem The file system of the destination within the account. * {@code null} for the current file system. * @param destinationPath Relative path from the file system to rename the directory to, excludes the file system * name. For example if you want to move a directory with fileSystem = "myfilesystem", path = "mydir/mysubdir" to * another path in myfilesystem (ex: newdir) then set the destinationPath = "newdir" * @param sourceRequestConditions {@link DataLakeRequestConditions} against the source. * @param destinationRequestConditions {@link DataLakeRequestConditions} against the destination. * @return A {@link Mono} containing a {@link Response} whose {@link Response * DataLakeDirectoryAsyncClient} used to interact with the directory created. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<DataLakeDirectoryAsyncClient>> renameWithResponse(String destinationFileSystem, String destinationPath, DataLakeRequestConditions sourceRequestConditions, DataLakeRequestConditions destinationRequestConditions) { try { return withContext(context -> renameWithResponse(destinationFileSystem, destinationPath, sourceRequestConditions, destinationRequestConditions, context)).map( response -> new SimpleResponse<>(response, new DataLakeDirectoryAsyncClient(response.getValue()))); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } /** * Returns a reactive Publisher emitting all the files/directories in this directory lazily as needed. For more * information, see the <a href="https: * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.listPaths --> * <pre> * client.listPaths& * </pre> * <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.listPaths --> * * @return A reactive response emitting the list of files/directories. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<PathItem> listPaths() { return this.listPaths(false, false, null); } /** * Returns a reactive Publisher emitting all the files/directories in this directory lazily as needed. For more * information, see the <a href="https: * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.listPaths * <pre> * client.listPaths& * .subscribe& * </pre> * <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.listPaths * * @param recursive Specifies if the call should recursively include all paths. * @param userPrincipleNameReturned If "true", the user identity values returned by the x-ms-owner, x-ms-group, * and x-ms-acl response headers will be transformed from Azure Active Directory Object IDs to User Principal Names. * If "false", the values will be returned as Azure Active Directory Object IDs. * The default value is false. Note that group and application Object IDs are not translated because they do not * have unique friendly names. * @param maxResults Specifies the maximum number of blobs to return per page, including all BlobPrefix elements. If * the request does not specify maxResults or specifies a value greater than 5,000, the server will return up to * 5,000 items per page. * @return A reactive response emitting the list of files/directories. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<PathItem> listPaths(boolean recursive, boolean userPrincipleNameReturned, Integer maxResults) { try { return listPathsWithOptionalTimeout(recursive, userPrincipleNameReturned, maxResults, null); } catch (RuntimeException ex) { return pagedFluxError(LOGGER, ex); } } PagedFlux<PathItem> listPathsWithOptionalTimeout(boolean recursive, boolean userPrincipleNameReturned, Integer maxResults, Duration timeout) { BiFunction<String, Integer, Mono<PagedResponse<PathItem>>> func = (marker, pageSize) -> listPathsSegment(marker, recursive, userPrincipleNameReturned, pageSize == null ? maxResults : pageSize, timeout) .map(response -> { List<PathItem> value = response.getValue() == null ? Collections.emptyList() : response.getValue().getPaths().stream() .map(Transforms::toPathItem) .collect(Collectors.toList()); return new PagedResponseBase<>( response.getRequest(), response.getStatusCode(), response.getHeaders(), value, response.getDeserializedHeaders().getXMsContinuation(), response.getDeserializedHeaders()); }); return new PagedFlux<>(pageSize -> func.apply(null, pageSize), func); } private Mono<FileSystemsListPathsResponse> listPathsSegment(String marker, boolean recursive, boolean userPrincipleNameReturned, Integer maxResults, Duration timeout) { return StorageImplUtils.applyOptionalTimeout( this.fileSystemDataLakeStorage.getFileSystems().listPathsWithResponseAsync( recursive, null, null, marker, getDirectoryPath(), maxResults, userPrincipleNameReturned, Context.NONE), timeout); } /** * Prepares a SpecializedBlobClientBuilder with the pathname appended to the end of the current BlockBlobClient's * url * @param pathName The name of the path to append * @return {@link SpecializedBlobClientBuilder} */ SpecializedBlobClientBuilder prepareBuilderAppendPath(String pathName) { String blobUrl = DataLakeImplUtils.endpointToDesiredEndpoint(getPathUrl(), "blob", "dfs"); return new SpecializedBlobClientBuilder() .pipeline(getHttpPipeline()) .serviceVersion(TransformUtils.toBlobServiceVersion(getServiceVersion())) .endpoint(StorageImplUtils.appendToUrlPath(blobUrl, pathName).toString()); } }
- we should rename `t` to `e` - another improvement. If `t` is already `RuntimeException` we don't have to wrap it. Perhaps we need another method in the logger and encapsulate it there.
private int getJavaVersion() { String version = System.getProperty("java.version"); if (CoreUtils.isNullOrEmpty(version)) { throw LOGGER.logExceptionAsError(new RuntimeException("Can't find 'java.version' system property.")); } if (version.startsWith("1.")) { if (version.length() < 3) { throw LOGGER.logExceptionAsError(new RuntimeException("Can't parse 'java.version':" + version)); } try { return Integer.parseInt(version.substring(2, 3)); } catch (Exception t) { throw LOGGER.logExceptionAsError(new RuntimeException("Can't parse 'java.version':" + version, t)); } } else { int idx = version.indexOf("."); if (idx == -1) { return Integer.parseInt(version); } try { return Integer.parseInt(version.substring(0, idx)); } catch (Exception t) { throw LOGGER.logExceptionAsError(new RuntimeException("Can't parse 'java.version':" + version, t)); } } }
throw LOGGER.logExceptionAsError(new RuntimeException("Can't parse 'java.version':" + version, t));
private int getJavaVersion() { String version = System.getProperty("java.version"); if (CoreUtils.isNullOrEmpty(version)) { throw LOGGER.logExceptionAsError(new RuntimeException("Can't find 'java.version' system property.")); } if (version.startsWith("1.")) { if (version.length() < 3) { throw LOGGER.logExceptionAsError(new RuntimeException("Can't parse 'java.version':" + version)); } try { return Integer.parseInt(version.substring(2, 3)); } catch (Exception t) { throw LOGGER.logExceptionAsError(new RuntimeException("Can't parse 'java.version':" + version, t)); } } else { int idx = version.indexOf("."); if (idx == -1) { return Integer.parseInt(version); } try { return Integer.parseInt(version.substring(0, idx)); } catch (Exception t) { throw LOGGER.logExceptionAsError(new RuntimeException("Can't parse 'java.version':" + version, t)); } } }
class JdkAsyncHttpClient implements HttpClient { private static final ClientLogger LOGGER = new ClientLogger(JdkAsyncHttpClient.class); private final java.net.http.HttpClient jdkHttpClient; private final Set<String> restrictedHeaders; JdkAsyncHttpClient(java.net.http.HttpClient httpClient, Set<String> restrictedHeaders) { this.jdkHttpClient = httpClient; int javaVersion = getJavaVersion(); if (javaVersion <= 11) { throw LOGGER.logExceptionAsError( new UnsupportedOperationException("JdkAsyncHttpClient is not supported in Java version 11 and below.")); } this.restrictedHeaders = restrictedHeaders; LOGGER.verbose("Effective restricted headers: {}", restrictedHeaders); } @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { boolean eagerlyReadResponse = (boolean) context.getData("azure-eagerly-read-response").orElse(false); return toJdkHttpRequest(request) .flatMap(jdkRequest -> Mono.fromCompletionStage(jdkHttpClient.sendAsync(jdkRequest, ofPublisher())) .flatMap(innerResponse -> { if (eagerlyReadResponse) { int statusCode = innerResponse.statusCode(); HttpHeaders headers = fromJdkHttpHeaders(innerResponse.headers()); return FluxUtil.collectBytesFromNetworkResponse(JdkFlowAdapter .flowPublisherToFlux(innerResponse.body()) .flatMapSequential(Flux::fromIterable), headers) .map(bytes -> new BufferedJdkHttpResponse(request, statusCode, headers, bytes)); } else { return Mono.just(new JdkHttpResponse(request, innerResponse)); } })); } /** * Converts the given azure-core request to the JDK HttpRequest type. * * @param request the azure-core request * @return the Mono emitting HttpRequest */ private Mono<java.net.http.HttpRequest> toJdkHttpRequest(HttpRequest request) { return Mono.fromCallable(() -> { final java.net.http.HttpRequest.Builder builder = java.net.http.HttpRequest.newBuilder(); try { builder.uri(request.getUrl().toURI()); } catch (URISyntaxException e) { throw LOGGER.logExceptionAsError(Exceptions.propagate(e)); } final HttpHeaders headers = request.getHeaders(); if (headers != null) { for (HttpHeader header : headers) { final String headerName = header.getName(); if (!restrictedHeaders.contains(headerName)) { header.getValuesList().forEach(headerValue -> builder.header(headerName, headerValue)); } else { LOGGER.warning("The header '" + headerName + "' is restricted by default in JDK HttpClient 12 " + "and above. This header can be added to allow list in JAVA_HOME/conf/net.properties " + "or in System.setProperty() or in Configuration. Use the key 'jdk.httpclient" + ".allowRestrictedHeaders' and a comma separated list of header names."); } } } switch (request.getHttpMethod()) { case GET: return builder.GET().build(); case HEAD: return builder.method("HEAD", noBody()).build(); default: final String contentLength = request.getHeaders().getValue("content-length"); final BodyPublisher bodyPublisher = toBodyPublisher(request.getBody(), contentLength); return builder.method(request.getHttpMethod().toString(), bodyPublisher).build(); } }); } /** * Create BodyPublisher from the given java.nio.ByteBuffer publisher. * * @param bbPublisher stream of java.nio.ByteBuffer representing request content * @return the request BodyPublisher */ private static BodyPublisher toBodyPublisher(Flux<ByteBuffer> bbPublisher, String contentLength) { if (bbPublisher == null) { return noBody(); } final Flow.Publisher<ByteBuffer> bbFlowPublisher = JdkFlowAdapter.publisherToFlowPublisher(bbPublisher); if (CoreUtils.isNullOrEmpty(contentLength)) { return fromPublisher(bbFlowPublisher); } else { long contentLengthLong = Long.parseLong(contentLength); if (contentLengthLong < 1) { return noBody(); } else { return fromPublisher(bbFlowPublisher, contentLengthLong); } } } /** * Get the java runtime major version. * * @return the java major version */ /** * Converts the given JDK Http headers to azure-core Http header. * * @param headers the JDK Http headers * @return the azure-core Http headers */ static HttpHeaders fromJdkHttpHeaders(java.net.http.HttpHeaders headers) { final HttpHeaders httpHeaders = new HttpHeaders(); for (Map.Entry<String, List<String>> kvp : headers.map().entrySet()) { if (CoreUtils.isNullOrEmpty(kvp.getValue())) { continue; } httpHeaders.set(kvp.getKey(), kvp.getValue()); } return httpHeaders; } }
class JdkAsyncHttpClient implements HttpClient { private static final ClientLogger LOGGER = new ClientLogger(JdkAsyncHttpClient.class); private final java.net.http.HttpClient jdkHttpClient; private final Set<String> restrictedHeaders; JdkAsyncHttpClient(java.net.http.HttpClient httpClient, Set<String> restrictedHeaders) { this.jdkHttpClient = httpClient; int javaVersion = getJavaVersion(); if (javaVersion <= 11) { throw LOGGER.logExceptionAsError( new UnsupportedOperationException("JdkAsyncHttpClient is not supported in Java version 11 and below.")); } this.restrictedHeaders = restrictedHeaders; LOGGER.verbose("Effective restricted headers: {}", restrictedHeaders); } @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { boolean eagerlyReadResponse = (boolean) context.getData("azure-eagerly-read-response").orElse(false); return toJdkHttpRequest(request) .flatMap(jdkRequest -> Mono.fromCompletionStage(jdkHttpClient.sendAsync(jdkRequest, ofPublisher())) .flatMap(innerResponse -> { if (eagerlyReadResponse) { int statusCode = innerResponse.statusCode(); HttpHeaders headers = fromJdkHttpHeaders(innerResponse.headers()); return FluxUtil.collectBytesFromNetworkResponse(JdkFlowAdapter .flowPublisherToFlux(innerResponse.body()) .flatMapSequential(Flux::fromIterable), headers) .map(bytes -> new BufferedJdkHttpResponse(request, statusCode, headers, bytes)); } else { return Mono.just(new JdkHttpResponse(request, innerResponse)); } })); } /** * Converts the given azure-core request to the JDK HttpRequest type. * * @param request the azure-core request * @return the Mono emitting HttpRequest */ private Mono<java.net.http.HttpRequest> toJdkHttpRequest(HttpRequest request) { return Mono.fromCallable(() -> { final java.net.http.HttpRequest.Builder builder = java.net.http.HttpRequest.newBuilder(); try { builder.uri(request.getUrl().toURI()); } catch (URISyntaxException e) { throw LOGGER.logExceptionAsError(Exceptions.propagate(e)); } final HttpHeaders headers = request.getHeaders(); if (headers != null) { for (HttpHeader header : headers) { final String headerName = header.getName(); if (!restrictedHeaders.contains(headerName)) { header.getValuesList().forEach(headerValue -> builder.header(headerName, headerValue)); } else { LOGGER.warning("The header '" + headerName + "' is restricted by default in JDK HttpClient 12 " + "and above. This header can be added to allow list in JAVA_HOME/conf/net.properties " + "or in System.setProperty() or in Configuration. Use the key 'jdk.httpclient" + ".allowRestrictedHeaders' and a comma separated list of header names."); } } } switch (request.getHttpMethod()) { case GET: return builder.GET().build(); case HEAD: return builder.method("HEAD", noBody()).build(); default: final String contentLength = request.getHeaders().getValue("content-length"); final BodyPublisher bodyPublisher = toBodyPublisher(request.getBody(), contentLength); return builder.method(request.getHttpMethod().toString(), bodyPublisher).build(); } }); } /** * Create BodyPublisher from the given java.nio.ByteBuffer publisher. * * @param bbPublisher stream of java.nio.ByteBuffer representing request content * @return the request BodyPublisher */ private static BodyPublisher toBodyPublisher(Flux<ByteBuffer> bbPublisher, String contentLength) { if (bbPublisher == null) { return noBody(); } final Flow.Publisher<ByteBuffer> bbFlowPublisher = JdkFlowAdapter.publisherToFlowPublisher(bbPublisher); if (CoreUtils.isNullOrEmpty(contentLength)) { return fromPublisher(bbFlowPublisher); } else { long contentLengthLong = Long.parseLong(contentLength); if (contentLengthLong < 1) { return noBody(); } else { return fromPublisher(bbFlowPublisher, contentLengthLong); } } } /** * Get the java runtime major version. * * @return the java major version */ /** * Converts the given JDK Http headers to azure-core Http header. * * @param headers the JDK Http headers * @return the azure-core Http headers */ static HttpHeaders fromJdkHttpHeaders(java.net.http.HttpHeaders headers) { final HttpHeaders httpHeaders = new HttpHeaders(); for (Map.Entry<String, List<String>> kvp : headers.map().entrySet()) { if (CoreUtils.isNullOrEmpty(kvp.getValue())) { continue; } httpHeaders.set(kvp.getKey(), kvp.getValue()); } return httpHeaders; } }
Should this not be monoError instead so we also log the exception?
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { final UrlBuilder urlBuilder = UrlBuilder.parse(context.getHttpRequest().getUrl()); if (overwrite || urlBuilder.getPort() == null) { LOGGER.log(LogLevel.VERBOSE, () -> "Changing port to " + port); try { context.getHttpRequest().setUrl(urlBuilder.setPort(port).toUrl()); } catch (MalformedURLException e) { return Mono.error(new RuntimeException("Failed to set the HTTP request port to " + port + ".", e)); } } return next.process(); }
return Mono.error(new RuntimeException("Failed to set the HTTP request port to " + port + ".", e));
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { final UrlBuilder urlBuilder = UrlBuilder.parse(context.getHttpRequest().getUrl()); if (overwrite || urlBuilder.getPort() == null) { LOGGER.log(LogLevel.VERBOSE, () -> "Changing port to " + port); try { context.getHttpRequest().setUrl(urlBuilder.setPort(port).toUrl()); } catch (MalformedURLException e) { return Mono.error(new RuntimeException("Failed to set the HTTP request port to " + port + ".", e)); } } return next.process(); }
class PortPolicy implements HttpPipelinePolicy { private static final ClientLogger LOGGER = new ClientLogger(PortPolicy.class); private final int port; private final boolean overwrite; /** * Creates a new PortPolicy object. * * @param port The port to set. * @param overwrite Whether to overwrite a {@link HttpRequest HttpRequest's} port if it already has one. */ public PortPolicy(int port, boolean overwrite) { this.port = port; this.overwrite = overwrite; } @Override }
class PortPolicy implements HttpPipelinePolicy { private static final ClientLogger LOGGER = new ClientLogger(PortPolicy.class); private final int port; private final boolean overwrite; /** * Creates a new PortPolicy object. * * @param port The port to set. * @param overwrite Whether to overwrite a {@link HttpRequest HttpRequest's} port if it already has one. */ public PortPolicy(int port, boolean overwrite) { this.port = port; this.overwrite = overwrite; } @Override }
Is `completeEmitter1` `null`? How does `getCompleteEmitter()) == null` work in `SynchronousMessageSubscriber`? What is the purpose of these mock?
public void setup() { mocksCloseable = MockitoAnnotations.openMocks(this); when(work1.getId()).thenReturn(WORK_ID); when(work1.getNumberOfEvents()).thenReturn(NUMBER_OF_WORK_ITEMS); completeEmitter1 = Sinks.empty(); when(work1.getCompleteEmitter()).thenReturn(completeEmitter1); when(work2.getId()).thenReturn(WORK_ID_2); when(work2.getNumberOfEvents()).thenReturn(NUMBER_OF_WORK_ITEMS_2); completeEmitter2 = Sinks.empty(); when(work2.getCompleteEmitter()).thenReturn(completeEmitter2); syncSubscriber = new SynchronousMessageSubscriber(asyncClient, work1, false, operationTimeout); }
when(work1.getCompleteEmitter()).thenReturn(completeEmitter1);
public void setup() { mocksCloseable = MockitoAnnotations.openMocks(this); when(work1.getId()).thenReturn(WORK_ID); when(work1.getNumberOfEvents()).thenReturn(NUMBER_OF_WORK_ITEMS); when(work2.getId()).thenReturn(WORK_ID_2); when(work2.getNumberOfEvents()).thenReturn(NUMBER_OF_WORK_ITEMS_2); syncSubscriber = new SynchronousMessageSubscriber(asyncClient, work1, false, operationTimeout); }
class SynchronousMessageSubscriberTest { private static final long WORK_ID = 10L; private static final long WORK_ID_2 = 10L; private static final int NUMBER_OF_WORK_ITEMS = 4; private static final int NUMBER_OF_WORK_ITEMS_2 = 3; private final Duration operationTimeout = Duration.ofSeconds(10); @Mock private ServiceBusReceiverAsyncClient asyncClient; @Mock private SynchronousReceiveWork work1; @Mock private SynchronousReceiveWork work2; @Mock private Subscription subscription; @Captor private ArgumentCaptor<Long> subscriptionArgumentCaptor; private SynchronousMessageSubscriber syncSubscriber; private AutoCloseable mocksCloseable; private Sinks.Empty<Void> completeEmitter1; private Sinks.Empty<Void> completeEmitter2; @BeforeEach @AfterEach public void teardown() throws Exception { if (mocksCloseable != null) { mocksCloseable.close(); } syncSubscriber.dispose(); Mockito.framework().clearInlineMock(this); } /** * Verify that the initial subscription requests work1's number of work items. */ @Test public void workAddedAndRequestedUpstream() { when(work1.getRemainingEvents()).thenReturn(NUMBER_OF_WORK_ITEMS); syncSubscriber.hookOnSubscribe(subscription); verify(subscription).request(work1.getNumberOfEvents()); verify(work1).start(); assertEquals(0, syncSubscriber.getWorkQueueSize()); } /** * A work gets queued in work queue. */ @Test public void queueWorkTest() { syncSubscriber = new SynchronousMessageSubscriber(asyncClient, work1, false, operationTimeout); syncSubscriber.queueWork(work2); assertEquals(2, syncSubscriber.getWorkQueueSize()); } /** * Verifies that this processes multiple work items. */ @Test public void processesMultipleWorkItems() { final SynchronousReceiveWork work3 = mock(SynchronousReceiveWork.class); when(work3.getId()).thenReturn(3L); when(work3.getNumberOfEvents()).thenReturn(1); when(work3.getCompleteEmitter()).thenReturn(Sinks.empty()); final ServiceBusReceivedMessage message1 = mock(ServiceBusReceivedMessage.class); final ServiceBusReceivedMessage message2 = mock(ServiceBusReceivedMessage.class); final ServiceBusReceivedMessage message3 = mock(ServiceBusReceivedMessage.class); final AtomicBoolean isTerminal = new AtomicBoolean(false); final AtomicInteger remaining = new AtomicInteger(NUMBER_OF_WORK_ITEMS); doAnswer(invocation -> { ServiceBusReceivedMessage arg = invocation.getArgument(0); if (arg == message1 || arg == message2) { remaining.decrementAndGet(); return true; } else { return false; } }).when(work1).emitNext(any(ServiceBusReceivedMessage.class)); doAnswer(invocation -> isTerminal.get()).when(work1).isTerminal(); doAnswer(invocation -> remaining.get()).when(work1).getRemainingEvents(); when(work2.emitNext(message3)).thenReturn(true); when(work2.isTerminal()).thenReturn(false); syncSubscriber = new SynchronousMessageSubscriber(asyncClient, work1, false, operationTimeout); syncSubscriber.queueWork(work2); syncSubscriber.queueWork(work3); syncSubscriber.hookOnSubscribe(subscription); assertEquals(2, syncSubscriber.getWorkQueueSize()); syncSubscriber.hookOnNext(message1); syncSubscriber.hookOnNext(message2); isTerminal.set(true); syncSubscriber.hookOnNext(message3); verify(work2).start(); verify(work2).emitNext(message3); assertEquals(1, syncSubscriber.getWorkQueueSize()); verify(subscription, times(2)).request(subscriptionArgumentCaptor.capture()); final List<Long> allRequests = subscriptionArgumentCaptor.getAllValues(); final Set<Long> expected = new HashSet<>(); expected.add((long) work1.getNumberOfEvents()); final long requestedAfterWork1 = NUMBER_OF_WORK_ITEMS - remaining.get(); final long expectedDifference = work2.getNumberOfEvents() - requestedAfterWork1; expected.add(expectedDifference); assertEquals(expected.size(), allRequests.size()); allRequests.forEach(r -> assertTrue(expected.contains(r))); } /** * Verifies that when previous work have completed, update current work */ @Test public void updateCurrentWorkWhenQueueIsNotEmpty() { final SynchronousReceiveWork work3 = mock(SynchronousReceiveWork.class); when(work3.getId()).thenReturn(3L); when(work3.getNumberOfEvents()).thenReturn(1); when(work3.getCompleteEmitter()).thenReturn(Sinks.empty()); syncSubscriber = new SynchronousMessageSubscriber(asyncClient, work1, false, operationTimeout); syncSubscriber.queueWork(work2); syncSubscriber.queueWork(work3); assertEquals(3, syncSubscriber.getWorkQueueSize()); syncSubscriber.hookOnSubscribe(subscription); assertEquals(2, syncSubscriber.getWorkQueueSize()); when(work1.isTerminal()).thenReturn(true); completeEmitter1.emitEmpty(Sinks.EmitFailureHandler.FAIL_FAST); verify(work2).start(); assertEquals(1, syncSubscriber.getWorkQueueSize()); when(work2.isTerminal()).thenReturn(true); completeEmitter2.emitEmpty(Sinks.EmitFailureHandler.FAIL_FAST); verify(work3).start(); assertEquals(0, syncSubscriber.getWorkQueueSize()); } /** * Verifies that all work items are completed if the subscriber is disposed. */ @Test public void completesWorkOnCancel() { when(work1.getRemainingEvents()).thenReturn(NUMBER_OF_WORK_ITEMS); when(work2.getRemainingEvents()).thenReturn(NUMBER_OF_WORK_ITEMS_2); syncSubscriber.queueWork(work2); syncSubscriber.hookOnSubscribe(subscription); syncSubscriber.hookOnCancel(); verify(work1).complete(any(String.class), isNull()); verify(work2).complete(any(String.class), isNull()); assertEquals(0, syncSubscriber.getWorkQueueSize()); } /** * Verifies that all work items are completed if the subscriber encounters an error. */ @Test public void completesWorkOnError() { final Throwable error = new AmqpException(false, "Test-error", new AmqpErrorContext("foo.com")); when(work1.getRemainingEvents()).thenReturn(NUMBER_OF_WORK_ITEMS); when(work2.getRemainingEvents()).thenReturn(NUMBER_OF_WORK_ITEMS_2); syncSubscriber.queueWork(work2); syncSubscriber.hookOnSubscribe(subscription); syncSubscriber.hookOnError(error); verify(work1).complete(any(String.class), eq(error)); verify(work2).complete(any(String.class), eq(error)); assertEquals(0, syncSubscriber.getWorkQueueSize()); } @Test public void releaseIfNoActiveReceive() { when(work1.emitNext(any(ServiceBusReceivedMessage.class))).thenReturn(true); final ServiceBusReceivedMessage message1beforeTimeout = mock(ServiceBusReceivedMessage.class); final ServiceBusReceivedMessage message2beforeTimeout = mock(ServiceBusReceivedMessage.class); final ServiceBusReceivedMessage message1afterTimeout = mock(ServiceBusReceivedMessage.class); final ServiceBusReceivedMessage message2afterTimeout = mock(ServiceBusReceivedMessage.class); final AtomicBoolean isTerminal = new AtomicBoolean(false); doAnswer(invocation -> isTerminal.get()).when(work1).isTerminal(); final AtomicInteger expectedReleaseCalls = new AtomicInteger(0); final AtomicBoolean hadUnexpectedReleaseCall = new AtomicBoolean(false); doAnswer(invocation -> { ServiceBusReceivedMessage arg = invocation.getArgument(0); if (arg == message1afterTimeout || arg == message2afterTimeout) { expectedReleaseCalls.incrementAndGet(); } else { hadUnexpectedReleaseCall.set(true); } return Mono.empty(); }).when(asyncClient).release(any(ServiceBusReceivedMessage.class)); syncSubscriber = new SynchronousMessageSubscriber(asyncClient, work1, true, operationTimeout); syncSubscriber.hookOnSubscribe(subscription); syncSubscriber.hookOnNext(message1beforeTimeout); syncSubscriber.hookOnNext(message2beforeTimeout); isTerminal.set(true); syncSubscriber.hookOnNext(message1afterTimeout); syncSubscriber.hookOnNext(message2afterTimeout); verify(work1).emitNext(message1beforeTimeout); verify(work1).emitNext(message2beforeTimeout); verify(work1, never()).emitNext(message1afterTimeout); verify(work1, never()).emitNext(message2afterTimeout); assertEquals(2, expectedReleaseCalls.get()); assertFalse(hadUnexpectedReleaseCall.get()); } }
class SynchronousMessageSubscriberTest { private static final long WORK_ID = 10L; private static final long WORK_ID_2 = 10L; private static final int NUMBER_OF_WORK_ITEMS = 4; private static final int NUMBER_OF_WORK_ITEMS_2 = 3; private final Duration operationTimeout = Duration.ofSeconds(10); @Mock private ServiceBusReceiverAsyncClient asyncClient; @Mock private SynchronousReceiveWork work1; @Mock private SynchronousReceiveWork work2; @Mock private Subscription subscription; @Captor private ArgumentCaptor<Long> subscriptionArgumentCaptor; private SynchronousMessageSubscriber syncSubscriber; private AutoCloseable mocksCloseable; @BeforeEach @AfterEach public void teardown() throws Exception { if (mocksCloseable != null) { mocksCloseable.close(); } syncSubscriber.dispose(); Mockito.framework().clearInlineMock(this); } /** * Verify that the initial subscription requests work1's number of work items. */ @Test public void workAddedAndRequestedUpstream() { when(work1.getRemainingEvents()).thenReturn(NUMBER_OF_WORK_ITEMS); syncSubscriber.hookOnSubscribe(subscription); verify(subscription).request(work1.getNumberOfEvents()); verify(work1).start(); assertEquals(0, syncSubscriber.getWorkQueueSize()); } /** * A work gets queued in work queue. */ @Test public void queueWorkTest() { syncSubscriber = new SynchronousMessageSubscriber(asyncClient, work1, false, operationTimeout); syncSubscriber.queueWork(work2); assertEquals(2, syncSubscriber.getWorkQueueSize()); } /** * Verifies that this processes multiple work items and current work encounter timeout */ @Test public void processesMultipleWorkItemsAndCurrentWorkTimeout() { final ServiceBusReceivedMessage message1 = mock(ServiceBusReceivedMessage.class); final ServiceBusReceivedMessage message2 = mock(ServiceBusReceivedMessage.class); final ServiceBusReceivedMessage message3 = mock(ServiceBusReceivedMessage.class); final AtomicBoolean isTerminal = new AtomicBoolean(false); final AtomicInteger remaining = new AtomicInteger(NUMBER_OF_WORK_ITEMS); doAnswer(invocation -> { ServiceBusReceivedMessage arg = invocation.getArgument(0); if (arg == message1 || arg == message2) { remaining.decrementAndGet(); return true; } else { return false; } }).when(work1).emitNext(any(ServiceBusReceivedMessage.class)); doAnswer(invocation -> isTerminal.get()).when(work1).isTerminal(); doAnswer(invocation -> remaining.get()).when(work1).getRemainingEvents(); when(work2.emitNext(message3)).thenReturn(true); when(work2.isTerminal()).thenReturn(false); final SynchronousReceiveWork work3 = mock(SynchronousReceiveWork.class); when(work3.getId()).thenReturn(3L); when(work3.getNumberOfEvents()).thenReturn(1); syncSubscriber = new SynchronousMessageSubscriber(asyncClient, work1, false, operationTimeout); syncSubscriber.queueWork(work2); syncSubscriber.queueWork(work3); syncSubscriber.hookOnSubscribe(subscription); assertEquals(2, syncSubscriber.getWorkQueueSize()); syncSubscriber.hookOnNext(message1); syncSubscriber.hookOnNext(message2); isTerminal.set(true); syncSubscriber.hookOnNext(message3); verify(work2).start(); verify(work2).emitNext(message3); assertEquals(1, syncSubscriber.getWorkQueueSize()); verify(subscription, times(2)).request(subscriptionArgumentCaptor.capture()); final List<Long> allRequests = subscriptionArgumentCaptor.getAllValues(); assertEquals(NUMBER_OF_WORK_ITEMS, allRequests.get(0)); final long requestedAfterWork1 = NUMBER_OF_WORK_ITEMS - remaining.get(); final long expectedDifference = work2.getNumberOfEvents() - requestedAfterWork1; assertEquals(expectedDifference, allRequests.get(1)); } /** * Verifies that this processes multiple work items and current work can emit all messages successfully */ @Test public void processesMultipleWorkItemsAndCurrentWorkEmitAllMessages() { final ServiceBusReceivedMessage message1 = mock(ServiceBusReceivedMessage.class); final ServiceBusReceivedMessage message2 = mock(ServiceBusReceivedMessage.class); final ServiceBusReceivedMessage message3 = mock(ServiceBusReceivedMessage.class); final ServiceBusReceivedMessage message4 = mock(ServiceBusReceivedMessage.class); final ServiceBusReceivedMessage message5 = mock(ServiceBusReceivedMessage.class); final AtomicBoolean isTerminal = new AtomicBoolean(false); final AtomicInteger remaining = new AtomicInteger(NUMBER_OF_WORK_ITEMS); doAnswer(invocation -> { ServiceBusReceivedMessage arg = invocation.getArgument(0); remaining.decrementAndGet(); if (arg == message4) { isTerminal.set(true); } return true; }).when(work1).emitNext(any(ServiceBusReceivedMessage.class)); doAnswer(invocation -> isTerminal.get()).when(work1).isTerminal(); doAnswer(invocation -> remaining.get()).when(work1).getRemainingEvents(); when(work2.isTerminal()).thenReturn(false); when(work2.emitNext(message5)).thenReturn(true); final SynchronousReceiveWork work3 = mock(SynchronousReceiveWork.class); when(work3.getId()).thenReturn(3L); when(work3.getNumberOfEvents()).thenReturn(1); syncSubscriber = new SynchronousMessageSubscriber(asyncClient, work1, false, operationTimeout); syncSubscriber.queueWork(work2); syncSubscriber.queueWork(work3); syncSubscriber.hookOnSubscribe(subscription); assertEquals(2, syncSubscriber.getWorkQueueSize()); syncSubscriber.hookOnNext(message1); syncSubscriber.hookOnNext(message2); syncSubscriber.hookOnNext(message3); syncSubscriber.hookOnNext(message4); verify(work2).start(); syncSubscriber.hookOnNext(message5); verify(work2).emitNext(message5); assertEquals(1, syncSubscriber.getWorkQueueSize()); verify(subscription, times(2)).request(subscriptionArgumentCaptor.capture()); final List<Long> allRequests = subscriptionArgumentCaptor.getAllValues(); assertEquals(NUMBER_OF_WORK_ITEMS, allRequests.get(0)); assertEquals(NUMBER_OF_WORK_ITEMS_2, allRequests.get(1)); } /** * Verifies that all work items are completed if the subscriber is disposed. */ @Test public void completesWorkOnCancel() { when(work1.getRemainingEvents()).thenReturn(NUMBER_OF_WORK_ITEMS); when(work2.getRemainingEvents()).thenReturn(NUMBER_OF_WORK_ITEMS_2); syncSubscriber.queueWork(work2); syncSubscriber.hookOnSubscribe(subscription); syncSubscriber.hookOnCancel(); verify(work1).complete(any(String.class), isNull()); verify(work2).complete(any(String.class), isNull()); assertEquals(0, syncSubscriber.getWorkQueueSize()); } /** * Verifies that all work items are completed if the subscriber encounters an error. */ @Test public void completesWorkOnError() { final Throwable error = new AmqpException(false, "Test-error", new AmqpErrorContext("foo.com")); when(work1.getRemainingEvents()).thenReturn(NUMBER_OF_WORK_ITEMS); when(work2.getRemainingEvents()).thenReturn(NUMBER_OF_WORK_ITEMS_2); syncSubscriber.queueWork(work2); syncSubscriber.hookOnSubscribe(subscription); syncSubscriber.hookOnError(error); verify(work1).complete(any(String.class), eq(error)); verify(work2).complete(any(String.class), eq(error)); assertEquals(0, syncSubscriber.getWorkQueueSize()); } @Test public void releaseIfNoActiveReceive() { when(work1.emitNext(any(ServiceBusReceivedMessage.class))).thenReturn(true); final ServiceBusReceivedMessage message1beforeTimeout = mock(ServiceBusReceivedMessage.class); final ServiceBusReceivedMessage message2beforeTimeout = mock(ServiceBusReceivedMessage.class); final ServiceBusReceivedMessage message1afterTimeout = mock(ServiceBusReceivedMessage.class); final ServiceBusReceivedMessage message2afterTimeout = mock(ServiceBusReceivedMessage.class); final AtomicBoolean isTerminal = new AtomicBoolean(false); doAnswer(invocation -> isTerminal.get()).when(work1).isTerminal(); final AtomicInteger expectedReleaseCalls = new AtomicInteger(0); final AtomicBoolean hadUnexpectedReleaseCall = new AtomicBoolean(false); doAnswer(invocation -> { ServiceBusReceivedMessage arg = invocation.getArgument(0); if (arg == message1afterTimeout || arg == message2afterTimeout) { expectedReleaseCalls.incrementAndGet(); } else { hadUnexpectedReleaseCall.set(true); } return Mono.empty(); }).when(asyncClient).release(any(ServiceBusReceivedMessage.class)); syncSubscriber = new SynchronousMessageSubscriber(asyncClient, work1, true, operationTimeout); syncSubscriber.hookOnSubscribe(subscription); syncSubscriber.hookOnNext(message1beforeTimeout); syncSubscriber.hookOnNext(message2beforeTimeout); isTerminal.set(true); syncSubscriber.hookOnNext(message1afterTimeout); syncSubscriber.hookOnNext(message2afterTimeout); verify(work1).emitNext(message1beforeTimeout); verify(work1).emitNext(message2beforeTimeout); verify(work1, never()).emitNext(message1afterTimeout); verify(work1, never()).emitNext(message2afterTimeout); assertEquals(2, expectedReleaseCalls.get()); assertFalse(hadUnexpectedReleaseCall.get()); } }
indent
protected void doHealthCheck(Health.Builder builder) { if (this.producerAsyncClient == null && this.consumerAsyncClient == null) { builder.withDetail("No client configured", "No Event Hub producer or consumer clients found."); return; } if (this.producerAsyncClient != null) { producerAsyncClient.getEventHubProperties() .map(p -> builder.up()) .block(timeout); } else { consumerAsyncClient.getEventHubProperties() .map(p -> builder.up()) .block(timeout); } }
.block(timeout);
protected void doHealthCheck(Health.Builder builder) { if (this.producerAsyncClient == null && this.consumerAsyncClient == null) { builder.withDetail("No client configured", "No Event Hub producer or consumer clients found."); return; } if (this.producerAsyncClient != null) { producerAsyncClient.getEventHubProperties() .map(p -> builder.up()) .block(timeout); } else { consumerAsyncClient.getEventHubProperties() .map(p -> builder.up()) .block(timeout); } }
class EventHubsHealthIndicator extends AbstractHealthIndicator { private final EventHubProducerAsyncClient producerAsyncClient; private final EventHubConsumerAsyncClient consumerAsyncClient; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link EventHubsHealthIndicator}. * @param producerAsyncClient the producer client * @param consumerAsyncClient the consumer client */ public EventHubsHealthIndicator(EventHubProducerAsyncClient producerAsyncClient, EventHubConsumerAsyncClient consumerAsyncClient) { this.producerAsyncClient = producerAsyncClient; this.consumerAsyncClient = consumerAsyncClient; } @Override /** * Set health check request timeout. * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
class EventHubsHealthIndicator extends AbstractHealthIndicator { private final EventHubProducerAsyncClient producerAsyncClient; private final EventHubConsumerAsyncClient consumerAsyncClient; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link EventHubsHealthIndicator}. * @param producerAsyncClient the producer client * @param consumerAsyncClient the consumer client */ public EventHubsHealthIndicator(EventHubProducerAsyncClient producerAsyncClient, EventHubConsumerAsyncClient consumerAsyncClient) { this.producerAsyncClient = producerAsyncClient; this.consumerAsyncClient = consumerAsyncClient; } @Override /** * Set health check request timeout. * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
I remove this code here for two reason base on my understanding: 1. the work in queue is never started, so it won't be terminated except the subscriber disposed. 2. the work in queue won't send request, so we if add the remaining to current REQUESTED, it may be wrong. The difference has been calculate when try to `requestUpstream()`.
private SynchronousReceiveWork getOrUpdateCurrentWork() { synchronized (currentWorkLock) { if (currentWork != null && !currentWork.isTerminal()) { return currentWork; } currentWork = workQueue.poll(); while (currentWork != null && currentWork.isTerminal()) { currentWork = workQueue.poll(); } if (currentWork != null) { final SynchronousReceiveWork work = currentWork; logger.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .log("Current work updated."); work.start(); requestUpstream(work.getNumberOfEvents()); } return currentWork; } }
if (currentWork != null && !currentWork.isTerminal()) {
private SynchronousReceiveWork getOrUpdateCurrentWork() { synchronized (currentWorkLock) { if (currentWork != null && !currentWork.isTerminal()) { return currentWork; } currentWork = workQueue.poll(); while (currentWork != null && currentWork.isTerminal()) { logger.atVerbose() .addKeyValue(WORK_ID_KEY, currentWork.getId()) .addKeyValue("numberOfEvents", currentWork.getNumberOfEvents()) .log("This work from queue is terminal. Skip it."); currentWork = workQueue.poll(); } if (currentWork != null) { final SynchronousReceiveWork work = currentWork; logger.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .log("Current work updated."); work.start(); requestUpstream(work.getNumberOfEvents()); } return currentWork; } }
class SynchronousMessageSubscriber extends BaseSubscriber<ServiceBusReceivedMessage> { private final ClientLogger logger = new ClientLogger(SynchronousMessageSubscriber.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger wip = new AtomicInteger(); private final ConcurrentLinkedQueue<SynchronousReceiveWork> workQueue = new ConcurrentLinkedQueue<>(); private final ConcurrentLinkedDeque<ServiceBusReceivedMessage> bufferMessages = new ConcurrentLinkedDeque<>(); private final Object currentWorkLock = new Object(); private final ServiceBusReceiverAsyncClient asyncClient; private final boolean isPrefetchDisabled; private final Duration operationTimeout; private volatile SynchronousReceiveWork currentWork; /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<SynchronousMessageSubscriber> REQUESTED = AtomicLongFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, "requested"); private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<SynchronousMessageSubscriber, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, Subscription.class, "upstream"); /** * Creates a synchronous subscriber with some initial work to queue. * * * @param asyncClient Client to update disposition of messages. * @param isPrefetchDisabled Indicates if the prefetch is disabled. * @param operationTimeout Timeout to wait for operation to complete. * @param initialWork Initial work to queue. * * <p> * When {@code isPrefetchDisabled} is true, we release the messages those received during the timespan * between the last terminated downstream and the next active downstream. * </p> * * @throws NullPointerException if {@code initialWork} is null. * @throws IllegalArgumentException if {@code initialWork.getNumberOfEvents()} is less than 1. */ SynchronousMessageSubscriber(ServiceBusReceiverAsyncClient asyncClient, SynchronousReceiveWork initialWork, boolean isPrefetchDisabled, Duration operationTimeout) { this.asyncClient = Objects.requireNonNull(asyncClient, "'asyncClient' cannot be null."); this.operationTimeout = Objects.requireNonNull(operationTimeout, "'operationTimeout' cannot be null."); this.workQueue.add(Objects.requireNonNull(initialWork, "'initialWork' cannot be null.")); this.isPrefetchDisabled = isPrefetchDisabled; if (initialWork.getNumberOfEvents() < 1) { throw logger.logExceptionAsError(new IllegalArgumentException( "'numberOfEvents' cannot be less than 1. Actual: " + initialWork.getNumberOfEvents())); } Operators.addCap(REQUESTED, this, initialWork.getNumberOfEvents()); } /** * On an initial subscription, will take the first work item, and request that amount of work for it. * * @param subscription Subscription for upstream. */ @Override protected void hookOnSubscribe(Subscription subscription) { if (!Operators.setOnce(UPSTREAM, this, subscription)) { logger.warning("This should only be subscribed to once. Ignoring subscription."); return; } getOrUpdateCurrentWork(); subscription.request(REQUESTED.get(this)); } /** * Publishes the event to the current {@link SynchronousReceiveWork}. If that work item is complete, will dispose of * the subscriber. * * @param message Event to publish. */ @Override protected void hookOnNext(ServiceBusReceivedMessage message) { if (isTerminated()) { Operators.onNextDropped(message, Context.empty()); } else { bufferMessages.add(message); drain(); } } /** * Queue the work to be picked up by drain loop. * * @param work to be queued. */ void queueWork(SynchronousReceiveWork work) { Objects.requireNonNull(work, "'work' cannot be null"); workQueue.add(work); LoggingEventBuilder logBuilder = logger.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .addKeyValue("timeout", work.getTimeout()); if (workQueue.peek() == work && (currentWork == null || currentWork.isTerminal())) { logBuilder.log("First work in queue. Requesting upstream if needed."); getOrUpdateCurrentWork(); } else { logBuilder.log("Queuing receive work."); } if (UPSTREAM.get(this) != null) { drain(); } } /** * Drain the work, only one thread can be in this loop at a time. */ private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { try { drainQueue(); } finally { missed = wip.addAndGet(-missed); } } } /*** * Drain the queue using a lock on current work in progress. */ private void drainQueue() { if (isTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = bufferMessages.isEmpty(); SynchronousReceiveWork currentDownstream = null; while (numberRequested != 0L && !isEmpty) { if (isTerminated()) { break; } long numberConsumed = 0L; while (numberRequested != numberConsumed) { if (isEmpty || isTerminated()) { break; } final ServiceBusReceivedMessage message = bufferMessages.poll(); boolean isEmitted = false; while (!isEmitted) { currentDownstream = getOrUpdateCurrentWork(); if (currentDownstream == null) { break; } isEmitted = currentDownstream.emitNext(message); } if (!isEmitted) { if (isPrefetchDisabled) { asyncClient.release(message).subscribe(__ -> { }, error -> logger.atWarning() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Couldn't release the message.", error), () -> logger.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Message successfully released.")); } else { bufferMessages.addFirst(message); break; } } numberConsumed++; isEmpty = bufferMessages.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberConsumed); } } if (numberRequested == 0L) { logger.atVerbose() .log("Current work is completed. Schedule next work."); getOrUpdateCurrentWork(); } } /** * {@inheritDoc} */ @Override protected void hookOnError(Throwable throwable) { dispose("Errors occurred upstream", throwable); } @Override protected void hookOnCancel() { this.dispose(); } private boolean isTerminated() { if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { return true; } return isDisposed.get(); } /** * Gets the current work item if it is not terminal and cleans up any existing timeout operations. * * @return Gets or sets the next work item. Null if there are no work items currently. */ /** * Adds additional credits upstream if {@code numberOfMessages} is greater than the number of {@code REQUESTED} * items. * * @param numberOfMessages Number of messages required downstream. */ private void requestUpstream(long numberOfMessages) { if (isTerminated()) { logger.info("Cannot request more messages upstream. Subscriber is terminated."); return; } final Subscription subscription = UPSTREAM.get(this); if (subscription == null) { logger.info("There is no upstream to request messages from."); return; } final long currentRequested = REQUESTED.get(this); final long difference = numberOfMessages - currentRequested; logger.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, currentRequested) .addKeyValue("numberOfMessages", numberOfMessages) .addKeyValue("difference", difference) .log("Requesting messages from upstream."); if (difference <= 0) { return; } Operators.addCap(REQUESTED, this, difference); subscription.request(difference); } @Override public void dispose() { super.dispose(); dispose("Upstream completed the receive work.", null); } private void dispose(String message, Throwable throwable) { super.dispose(); if (isDisposed.getAndSet(true)) { return; } synchronized (currentWorkLock) { if (currentWork != null) { currentWork.complete(message, throwable); currentWork = null; } SynchronousReceiveWork w = workQueue.poll(); while (w != null) { w.complete(message, throwable); w = workQueue.poll(); } } } /** * package-private method to check queue size. * * @return The current number of items in the queue. */ int getWorkQueueSize() { return this.workQueue.size(); } }
class SynchronousMessageSubscriber extends BaseSubscriber<ServiceBusReceivedMessage> { private final ClientLogger logger = new ClientLogger(SynchronousMessageSubscriber.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger wip = new AtomicInteger(); private final ConcurrentLinkedQueue<SynchronousReceiveWork> workQueue = new ConcurrentLinkedQueue<>(); private final ConcurrentLinkedDeque<ServiceBusReceivedMessage> bufferMessages = new ConcurrentLinkedDeque<>(); private final Object currentWorkLock = new Object(); private final ServiceBusReceiverAsyncClient asyncClient; private final boolean isPrefetchDisabled; private final Duration operationTimeout; private volatile SynchronousReceiveWork currentWork; /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<SynchronousMessageSubscriber> REQUESTED = AtomicLongFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, "requested"); private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<SynchronousMessageSubscriber, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, Subscription.class, "upstream"); /** * Creates a synchronous subscriber with some initial work to queue. * * * @param asyncClient Client to update disposition of messages. * @param isPrefetchDisabled Indicates if the prefetch is disabled. * @param operationTimeout Timeout to wait for operation to complete. * @param initialWork Initial work to queue. * * <p> * When {@code isPrefetchDisabled} is true, we release the messages those received during the timespan * between the last terminated downstream and the next active downstream. * </p> * * @throws NullPointerException if {@code initialWork} is null. * @throws IllegalArgumentException if {@code initialWork.getNumberOfEvents()} is less than 1. */ SynchronousMessageSubscriber(ServiceBusReceiverAsyncClient asyncClient, SynchronousReceiveWork initialWork, boolean isPrefetchDisabled, Duration operationTimeout) { this.asyncClient = Objects.requireNonNull(asyncClient, "'asyncClient' cannot be null."); this.operationTimeout = Objects.requireNonNull(operationTimeout, "'operationTimeout' cannot be null."); this.workQueue.add(Objects.requireNonNull(initialWork, "'initialWork' cannot be null.")); this.isPrefetchDisabled = isPrefetchDisabled; if (initialWork.getNumberOfEvents() < 1) { throw logger.logExceptionAsError(new IllegalArgumentException( "'numberOfEvents' cannot be less than 1. Actual: " + initialWork.getNumberOfEvents())); } Operators.addCap(REQUESTED, this, initialWork.getNumberOfEvents()); } /** * On an initial subscription, will take the first work item, and request that amount of work for it. * * @param subscription Subscription for upstream. */ @Override protected void hookOnSubscribe(Subscription subscription) { if (!Operators.setOnce(UPSTREAM, this, subscription)) { logger.warning("This should only be subscribed to once. Ignoring subscription."); return; } getOrUpdateCurrentWork(); subscription.request(REQUESTED.get(this)); } /** * Publishes the event to the current {@link SynchronousReceiveWork}. If that work item is complete, will dispose of * the subscriber. * * @param message Event to publish. */ @Override protected void hookOnNext(ServiceBusReceivedMessage message) { if (isTerminated()) { Operators.onNextDropped(message, Context.empty()); } else { bufferMessages.add(message); drain(); } } /** * Queue the work to be picked up by drain loop. * * @param work to be queued. */ void queueWork(SynchronousReceiveWork work) { Objects.requireNonNull(work, "'work' cannot be null"); workQueue.add(work); LoggingEventBuilder logBuilder = logger.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .addKeyValue("timeout", work.getTimeout()); if (workQueue.peek() == work && (currentWork == null || currentWork.isTerminal())) { logBuilder.log("First work in queue. Requesting upstream if needed."); getOrUpdateCurrentWork(); } else { logBuilder.log("Queuing receive work."); } if (UPSTREAM.get(this) != null) { drain(); } } /** * Drain the work, only one thread can be in this loop at a time. */ private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { try { drainQueue(); } finally { missed = wip.addAndGet(-missed); } } } /*** * Drain the queue using a lock on current work in progress. */ private void drainQueue() { if (isTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = bufferMessages.isEmpty(); SynchronousReceiveWork currentDownstream = null; while (numberRequested != 0L && !isEmpty) { if (isTerminated()) { break; } long numberConsumed = 0L; while (numberRequested != numberConsumed) { if (isEmpty || isTerminated()) { break; } final ServiceBusReceivedMessage message = bufferMessages.poll(); boolean isEmitted = false; while (!isEmitted) { currentDownstream = getOrUpdateCurrentWork(); if (currentDownstream == null) { break; } isEmitted = currentDownstream.emitNext(message); } if (!isEmitted) { if (isPrefetchDisabled) { asyncClient.release(message).subscribe(__ -> { }, error -> logger.atWarning() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Couldn't release the message.", error), () -> logger.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Message successfully released.")); } else { bufferMessages.addFirst(message); break; } } numberConsumed++; isEmpty = bufferMessages.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberConsumed); } } if (numberRequested == 0L) { logger.atVerbose() .log("Current work is completed. Schedule next work."); getOrUpdateCurrentWork(); } } /** * {@inheritDoc} */ @Override protected void hookOnError(Throwable throwable) { dispose("Errors occurred upstream", throwable); } @Override protected void hookOnCancel() { this.dispose(); } private boolean isTerminated() { if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { return true; } return isDisposed.get(); } /** * Gets the current work item if it is not terminal and cleans up any existing timeout operations. * * @return Gets or sets the next work item. Null if there are no work items currently. */ /** * Adds additional credits upstream if {@code numberOfMessages} is greater than the number of {@code REQUESTED} * items. * * @param numberOfMessages Number of messages required downstream. */ private void requestUpstream(long numberOfMessages) { if (isTerminated()) { logger.info("Cannot request more messages upstream. Subscriber is terminated."); return; } final Subscription subscription = UPSTREAM.get(this); if (subscription == null) { logger.info("There is no upstream to request messages from."); return; } final long currentRequested = REQUESTED.get(this); final long difference = numberOfMessages - currentRequested; logger.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, currentRequested) .addKeyValue("numberOfMessages", numberOfMessages) .addKeyValue("difference", difference) .log("Requesting messages from upstream."); if (difference <= 0) { return; } Operators.addCap(REQUESTED, this, difference); subscription.request(difference); } @Override public void dispose() { super.dispose(); dispose("Upstream completed the receive work.", null); } private void dispose(String message, Throwable throwable) { super.dispose(); if (isDisposed.getAndSet(true)) { return; } synchronized (currentWorkLock) { if (currentWork != null) { currentWork.complete(message, throwable); currentWork = null; } SynchronousReceiveWork w = workQueue.poll(); while (w != null) { w.complete(message, throwable); w = workQueue.poll(); } } } /** * package-private method to check queue size. * * @return The current number of items in the queue. */ int getWorkQueueSize() { return this.workQueue.size(); } }
This seems to be the successfully complete current work case. Where does code go if current work timeout or killed?
private void drainQueue() { if (isTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = bufferMessages.isEmpty(); SynchronousReceiveWork currentDownstream = null; while (numberRequested != 0L && !isEmpty) { if (isTerminated()) { break; } long numberConsumed = 0L; while (numberRequested != numberConsumed) { if (isEmpty || isTerminated()) { break; } final ServiceBusReceivedMessage message = bufferMessages.poll(); boolean isEmitted = false; while (!isEmitted) { currentDownstream = getOrUpdateCurrentWork(); if (currentDownstream == null) { break; } isEmitted = currentDownstream.emitNext(message); } if (!isEmitted) { if (isPrefetchDisabled) { asyncClient.release(message).subscribe(__ -> { }, error -> logger.atWarning() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Couldn't release the message.", error), () -> logger.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Message successfully released.")); } else { bufferMessages.addFirst(message); break; } } numberConsumed++; isEmpty = bufferMessages.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberConsumed); } } if (numberRequested == 0L) { logger.atVerbose() .log("Current work is completed. Schedule next work."); getOrUpdateCurrentWork(); } }
if (numberRequested == 0L) {
private void drainQueue() { if (isTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = bufferMessages.isEmpty(); SynchronousReceiveWork currentDownstream = null; while (numberRequested != 0L && !isEmpty) { if (isTerminated()) { break; } long numberConsumed = 0L; while (numberRequested != numberConsumed) { if (isEmpty || isTerminated()) { break; } final ServiceBusReceivedMessage message = bufferMessages.poll(); boolean isEmitted = false; while (!isEmitted) { currentDownstream = getOrUpdateCurrentWork(); if (currentDownstream == null) { break; } isEmitted = currentDownstream.emitNext(message); } if (!isEmitted) { if (isPrefetchDisabled) { asyncClient.release(message).subscribe(__ -> { }, error -> logger.atWarning() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Couldn't release the message.", error), () -> logger.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Message successfully released.")); } else { bufferMessages.addFirst(message); break; } } numberConsumed++; isEmpty = bufferMessages.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberConsumed); } } if (numberRequested == 0L) { logger.atVerbose() .log("Current work is completed. Schedule next work."); getOrUpdateCurrentWork(); } }
class SynchronousMessageSubscriber extends BaseSubscriber<ServiceBusReceivedMessage> { private final ClientLogger logger = new ClientLogger(SynchronousMessageSubscriber.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger wip = new AtomicInteger(); private final ConcurrentLinkedQueue<SynchronousReceiveWork> workQueue = new ConcurrentLinkedQueue<>(); private final ConcurrentLinkedDeque<ServiceBusReceivedMessage> bufferMessages = new ConcurrentLinkedDeque<>(); private final Object currentWorkLock = new Object(); private final ServiceBusReceiverAsyncClient asyncClient; private final boolean isPrefetchDisabled; private final Duration operationTimeout; private volatile SynchronousReceiveWork currentWork; /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<SynchronousMessageSubscriber> REQUESTED = AtomicLongFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, "requested"); private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<SynchronousMessageSubscriber, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, Subscription.class, "upstream"); /** * Creates a synchronous subscriber with some initial work to queue. * * * @param asyncClient Client to update disposition of messages. * @param isPrefetchDisabled Indicates if the prefetch is disabled. * @param operationTimeout Timeout to wait for operation to complete. * @param initialWork Initial work to queue. * * <p> * When {@code isPrefetchDisabled} is true, we release the messages those received during the timespan * between the last terminated downstream and the next active downstream. * </p> * * @throws NullPointerException if {@code initialWork} is null. * @throws IllegalArgumentException if {@code initialWork.getNumberOfEvents()} is less than 1. */ SynchronousMessageSubscriber(ServiceBusReceiverAsyncClient asyncClient, SynchronousReceiveWork initialWork, boolean isPrefetchDisabled, Duration operationTimeout) { this.asyncClient = Objects.requireNonNull(asyncClient, "'asyncClient' cannot be null."); this.operationTimeout = Objects.requireNonNull(operationTimeout, "'operationTimeout' cannot be null."); this.workQueue.add(Objects.requireNonNull(initialWork, "'initialWork' cannot be null.")); this.isPrefetchDisabled = isPrefetchDisabled; if (initialWork.getNumberOfEvents() < 1) { throw logger.logExceptionAsError(new IllegalArgumentException( "'numberOfEvents' cannot be less than 1. Actual: " + initialWork.getNumberOfEvents())); } Operators.addCap(REQUESTED, this, initialWork.getNumberOfEvents()); } /** * On an initial subscription, will take the first work item, and request that amount of work for it. * * @param subscription Subscription for upstream. */ @Override protected void hookOnSubscribe(Subscription subscription) { if (!Operators.setOnce(UPSTREAM, this, subscription)) { logger.warning("This should only be subscribed to once. Ignoring subscription."); return; } getOrUpdateCurrentWork(); subscription.request(REQUESTED.get(this)); } /** * Publishes the event to the current {@link SynchronousReceiveWork}. If that work item is complete, will dispose of * the subscriber. * * @param message Event to publish. */ @Override protected void hookOnNext(ServiceBusReceivedMessage message) { if (isTerminated()) { Operators.onNextDropped(message, Context.empty()); } else { bufferMessages.add(message); drain(); } } /** * Queue the work to be picked up by drain loop. * * @param work to be queued. */ void queueWork(SynchronousReceiveWork work) { Objects.requireNonNull(work, "'work' cannot be null"); workQueue.add(work); LoggingEventBuilder logBuilder = logger.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .addKeyValue("timeout", work.getTimeout()); if (workQueue.peek() == work && (currentWork == null || currentWork.isTerminal())) { logBuilder.log("First work in queue. Requesting upstream if needed."); getOrUpdateCurrentWork(); } else { logBuilder.log("Queuing receive work."); } if (UPSTREAM.get(this) != null) { drain(); } } /** * Drain the work, only one thread can be in this loop at a time. */ private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { try { drainQueue(); } finally { missed = wip.addAndGet(-missed); } } } /*** * Drain the queue using a lock on current work in progress. */ /** * {@inheritDoc} */ @Override protected void hookOnError(Throwable throwable) { dispose("Errors occurred upstream", throwable); } @Override protected void hookOnCancel() { this.dispose(); } private boolean isTerminated() { if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { return true; } return isDisposed.get(); } /** * Gets the current work item if it is not terminal and cleans up any existing timeout operations. * * @return Gets or sets the next work item. Null if there are no work items currently. */ private SynchronousReceiveWork getOrUpdateCurrentWork() { synchronized (currentWorkLock) { if (currentWork != null && !currentWork.isTerminal()) { return currentWork; } currentWork = workQueue.poll(); while (currentWork != null && currentWork.isTerminal()) { currentWork = workQueue.poll(); } if (currentWork != null) { final SynchronousReceiveWork work = currentWork; logger.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .log("Current work updated."); work.start(); requestUpstream(work.getNumberOfEvents()); } return currentWork; } } /** * Adds additional credits upstream if {@code numberOfMessages} is greater than the number of {@code REQUESTED} * items. * * @param numberOfMessages Number of messages required downstream. */ private void requestUpstream(long numberOfMessages) { if (isTerminated()) { logger.info("Cannot request more messages upstream. Subscriber is terminated."); return; } final Subscription subscription = UPSTREAM.get(this); if (subscription == null) { logger.info("There is no upstream to request messages from."); return; } final long currentRequested = REQUESTED.get(this); final long difference = numberOfMessages - currentRequested; logger.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, currentRequested) .addKeyValue("numberOfMessages", numberOfMessages) .addKeyValue("difference", difference) .log("Requesting messages from upstream."); if (difference <= 0) { return; } Operators.addCap(REQUESTED, this, difference); subscription.request(difference); } @Override public void dispose() { super.dispose(); dispose("Upstream completed the receive work.", null); } private void dispose(String message, Throwable throwable) { super.dispose(); if (isDisposed.getAndSet(true)) { return; } synchronized (currentWorkLock) { if (currentWork != null) { currentWork.complete(message, throwable); currentWork = null; } SynchronousReceiveWork w = workQueue.poll(); while (w != null) { w.complete(message, throwable); w = workQueue.poll(); } } } /** * package-private method to check queue size. * * @return The current number of items in the queue. */ int getWorkQueueSize() { return this.workQueue.size(); } }
class SynchronousMessageSubscriber extends BaseSubscriber<ServiceBusReceivedMessage> { private final ClientLogger logger = new ClientLogger(SynchronousMessageSubscriber.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger wip = new AtomicInteger(); private final ConcurrentLinkedQueue<SynchronousReceiveWork> workQueue = new ConcurrentLinkedQueue<>(); private final ConcurrentLinkedDeque<ServiceBusReceivedMessage> bufferMessages = new ConcurrentLinkedDeque<>(); private final Object currentWorkLock = new Object(); private final ServiceBusReceiverAsyncClient asyncClient; private final boolean isPrefetchDisabled; private final Duration operationTimeout; private volatile SynchronousReceiveWork currentWork; /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<SynchronousMessageSubscriber> REQUESTED = AtomicLongFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, "requested"); private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<SynchronousMessageSubscriber, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, Subscription.class, "upstream"); /** * Creates a synchronous subscriber with some initial work to queue. * * * @param asyncClient Client to update disposition of messages. * @param isPrefetchDisabled Indicates if the prefetch is disabled. * @param operationTimeout Timeout to wait for operation to complete. * @param initialWork Initial work to queue. * * <p> * When {@code isPrefetchDisabled} is true, we release the messages those received during the timespan * between the last terminated downstream and the next active downstream. * </p> * * @throws NullPointerException if {@code initialWork} is null. * @throws IllegalArgumentException if {@code initialWork.getNumberOfEvents()} is less than 1. */ SynchronousMessageSubscriber(ServiceBusReceiverAsyncClient asyncClient, SynchronousReceiveWork initialWork, boolean isPrefetchDisabled, Duration operationTimeout) { this.asyncClient = Objects.requireNonNull(asyncClient, "'asyncClient' cannot be null."); this.operationTimeout = Objects.requireNonNull(operationTimeout, "'operationTimeout' cannot be null."); this.workQueue.add(Objects.requireNonNull(initialWork, "'initialWork' cannot be null.")); this.isPrefetchDisabled = isPrefetchDisabled; if (initialWork.getNumberOfEvents() < 1) { throw logger.logExceptionAsError(new IllegalArgumentException( "'numberOfEvents' cannot be less than 1. Actual: " + initialWork.getNumberOfEvents())); } Operators.addCap(REQUESTED, this, initialWork.getNumberOfEvents()); } /** * On an initial subscription, will take the first work item, and request that amount of work for it. * * @param subscription Subscription for upstream. */ @Override protected void hookOnSubscribe(Subscription subscription) { if (!Operators.setOnce(UPSTREAM, this, subscription)) { logger.warning("This should only be subscribed to once. Ignoring subscription."); return; } getOrUpdateCurrentWork(); subscription.request(REQUESTED.get(this)); } /** * Publishes the event to the current {@link SynchronousReceiveWork}. If that work item is complete, will dispose of * the subscriber. * * @param message Event to publish. */ @Override protected void hookOnNext(ServiceBusReceivedMessage message) { if (isTerminated()) { Operators.onNextDropped(message, Context.empty()); } else { bufferMessages.add(message); drain(); } } /** * Queue the work to be picked up by drain loop. * * @param work to be queued. */ void queueWork(SynchronousReceiveWork work) { Objects.requireNonNull(work, "'work' cannot be null"); workQueue.add(work); LoggingEventBuilder logBuilder = logger.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .addKeyValue("timeout", work.getTimeout()); if (workQueue.peek() == work && (currentWork == null || currentWork.isTerminal())) { logBuilder.log("First work in queue. Requesting upstream if needed."); getOrUpdateCurrentWork(); } else { logBuilder.log("Queuing receive work."); } if (UPSTREAM.get(this) != null) { drain(); } } /** * Drain the work, only one thread can be in this loop at a time. */ private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { try { drainQueue(); } finally { missed = wip.addAndGet(-missed); } } } /*** * Drain the queue using a lock on current work in progress. */ /** * {@inheritDoc} */ @Override protected void hookOnError(Throwable throwable) { dispose("Errors occurred upstream", throwable); } @Override protected void hookOnCancel() { this.dispose(); } private boolean isTerminated() { if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { return true; } return isDisposed.get(); } /** * Gets the current work item if it is not terminal and cleans up any existing timeout operations. * * @return Gets or sets the next work item. Null if there are no work items currently. */ private SynchronousReceiveWork getOrUpdateCurrentWork() { synchronized (currentWorkLock) { if (currentWork != null && !currentWork.isTerminal()) { return currentWork; } currentWork = workQueue.poll(); while (currentWork != null && currentWork.isTerminal()) { logger.atVerbose() .addKeyValue(WORK_ID_KEY, currentWork.getId()) .addKeyValue("numberOfEvents", currentWork.getNumberOfEvents()) .log("This work from queue is terminal. Skip it."); currentWork = workQueue.poll(); } if (currentWork != null) { final SynchronousReceiveWork work = currentWork; logger.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .log("Current work updated."); work.start(); requestUpstream(work.getNumberOfEvents()); } return currentWork; } } /** * Adds additional credits upstream if {@code numberOfMessages} is greater than the number of {@code REQUESTED} * items. * * @param numberOfMessages Number of messages required downstream. */ private void requestUpstream(long numberOfMessages) { if (isTerminated()) { logger.info("Cannot request more messages upstream. Subscriber is terminated."); return; } final Subscription subscription = UPSTREAM.get(this); if (subscription == null) { logger.info("There is no upstream to request messages from."); return; } final long currentRequested = REQUESTED.get(this); final long difference = numberOfMessages - currentRequested; logger.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, currentRequested) .addKeyValue("numberOfMessages", numberOfMessages) .addKeyValue("difference", difference) .log("Requesting messages from upstream."); if (difference <= 0) { return; } Operators.addCap(REQUESTED, this, difference); subscription.request(difference); } @Override public void dispose() { super.dispose(); dispose("Upstream completed the receive work.", null); } private void dispose(String message, Throwable throwable) { super.dispose(); if (isDisposed.getAndSet(true)) { return; } synchronized (currentWorkLock) { if (currentWork != null) { currentWork.complete(message, throwable); currentWork = null; } SynchronousReceiveWork w = workQueue.poll(); while (w != null) { w.complete(message, throwable); w = workQueue.poll(); } } } /** * package-private method to check queue size. * * @return The current number of items in the queue. */ int getWorkQueueSize() { return this.workQueue.size(); } }
I assume after `getOrUpdateCurrentWork` called, next work in queue become current work, and requested number updated, this Subscriber will need to get into the `drain/drainQueue` loop again? Do we know whether this is the case, and where in code this happens?
private void drainQueue() { if (isTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = bufferMessages.isEmpty(); SynchronousReceiveWork currentDownstream = null; while (numberRequested != 0L && !isEmpty) { if (isTerminated()) { break; } long numberConsumed = 0L; while (numberRequested != numberConsumed) { if (isEmpty || isTerminated()) { break; } final ServiceBusReceivedMessage message = bufferMessages.poll(); boolean isEmitted = false; while (!isEmitted) { currentDownstream = getOrUpdateCurrentWork(); if (currentDownstream == null) { break; } isEmitted = currentDownstream.emitNext(message); } if (!isEmitted) { if (isPrefetchDisabled) { asyncClient.release(message).subscribe(__ -> { }, error -> logger.atWarning() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Couldn't release the message.", error), () -> logger.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Message successfully released.")); } else { bufferMessages.addFirst(message); break; } } numberConsumed++; isEmpty = bufferMessages.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberConsumed); } } if (numberRequested == 0L) { logger.atVerbose() .log("Current work is completed. Schedule next work."); getOrUpdateCurrentWork(); } }
getOrUpdateCurrentWork();
private void drainQueue() { if (isTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = bufferMessages.isEmpty(); SynchronousReceiveWork currentDownstream = null; while (numberRequested != 0L && !isEmpty) { if (isTerminated()) { break; } long numberConsumed = 0L; while (numberRequested != numberConsumed) { if (isEmpty || isTerminated()) { break; } final ServiceBusReceivedMessage message = bufferMessages.poll(); boolean isEmitted = false; while (!isEmitted) { currentDownstream = getOrUpdateCurrentWork(); if (currentDownstream == null) { break; } isEmitted = currentDownstream.emitNext(message); } if (!isEmitted) { if (isPrefetchDisabled) { asyncClient.release(message).subscribe(__ -> { }, error -> logger.atWarning() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Couldn't release the message.", error), () -> logger.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Message successfully released.")); } else { bufferMessages.addFirst(message); break; } } numberConsumed++; isEmpty = bufferMessages.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberConsumed); } } if (numberRequested == 0L) { logger.atVerbose() .log("Current work is completed. Schedule next work."); getOrUpdateCurrentWork(); } }
class SynchronousMessageSubscriber extends BaseSubscriber<ServiceBusReceivedMessage> { private final ClientLogger logger = new ClientLogger(SynchronousMessageSubscriber.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger wip = new AtomicInteger(); private final ConcurrentLinkedQueue<SynchronousReceiveWork> workQueue = new ConcurrentLinkedQueue<>(); private final ConcurrentLinkedDeque<ServiceBusReceivedMessage> bufferMessages = new ConcurrentLinkedDeque<>(); private final Object currentWorkLock = new Object(); private final ServiceBusReceiverAsyncClient asyncClient; private final boolean isPrefetchDisabled; private final Duration operationTimeout; private volatile SynchronousReceiveWork currentWork; /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<SynchronousMessageSubscriber> REQUESTED = AtomicLongFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, "requested"); private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<SynchronousMessageSubscriber, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, Subscription.class, "upstream"); /** * Creates a synchronous subscriber with some initial work to queue. * * * @param asyncClient Client to update disposition of messages. * @param isPrefetchDisabled Indicates if the prefetch is disabled. * @param operationTimeout Timeout to wait for operation to complete. * @param initialWork Initial work to queue. * * <p> * When {@code isPrefetchDisabled} is true, we release the messages those received during the timespan * between the last terminated downstream and the next active downstream. * </p> * * @throws NullPointerException if {@code initialWork} is null. * @throws IllegalArgumentException if {@code initialWork.getNumberOfEvents()} is less than 1. */ SynchronousMessageSubscriber(ServiceBusReceiverAsyncClient asyncClient, SynchronousReceiveWork initialWork, boolean isPrefetchDisabled, Duration operationTimeout) { this.asyncClient = Objects.requireNonNull(asyncClient, "'asyncClient' cannot be null."); this.operationTimeout = Objects.requireNonNull(operationTimeout, "'operationTimeout' cannot be null."); this.workQueue.add(Objects.requireNonNull(initialWork, "'initialWork' cannot be null.")); this.isPrefetchDisabled = isPrefetchDisabled; if (initialWork.getNumberOfEvents() < 1) { throw logger.logExceptionAsError(new IllegalArgumentException( "'numberOfEvents' cannot be less than 1. Actual: " + initialWork.getNumberOfEvents())); } Operators.addCap(REQUESTED, this, initialWork.getNumberOfEvents()); } /** * On an initial subscription, will take the first work item, and request that amount of work for it. * * @param subscription Subscription for upstream. */ @Override protected void hookOnSubscribe(Subscription subscription) { if (!Operators.setOnce(UPSTREAM, this, subscription)) { logger.warning("This should only be subscribed to once. Ignoring subscription."); return; } getOrUpdateCurrentWork(); subscription.request(REQUESTED.get(this)); } /** * Publishes the event to the current {@link SynchronousReceiveWork}. If that work item is complete, will dispose of * the subscriber. * * @param message Event to publish. */ @Override protected void hookOnNext(ServiceBusReceivedMessage message) { if (isTerminated()) { Operators.onNextDropped(message, Context.empty()); } else { bufferMessages.add(message); drain(); } } /** * Queue the work to be picked up by drain loop. * * @param work to be queued. */ void queueWork(SynchronousReceiveWork work) { Objects.requireNonNull(work, "'work' cannot be null"); workQueue.add(work); LoggingEventBuilder logBuilder = logger.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .addKeyValue("timeout", work.getTimeout()); if (workQueue.peek() == work && (currentWork == null || currentWork.isTerminal())) { logBuilder.log("First work in queue. Requesting upstream if needed."); getOrUpdateCurrentWork(); } else { logBuilder.log("Queuing receive work."); } if (UPSTREAM.get(this) != null) { drain(); } } /** * Drain the work, only one thread can be in this loop at a time. */ private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { try { drainQueue(); } finally { missed = wip.addAndGet(-missed); } } } /*** * Drain the queue using a lock on current work in progress. */ /** * {@inheritDoc} */ @Override protected void hookOnError(Throwable throwable) { dispose("Errors occurred upstream", throwable); } @Override protected void hookOnCancel() { this.dispose(); } private boolean isTerminated() { if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { return true; } return isDisposed.get(); } /** * Gets the current work item if it is not terminal and cleans up any existing timeout operations. * * @return Gets or sets the next work item. Null if there are no work items currently. */ private SynchronousReceiveWork getOrUpdateCurrentWork() { synchronized (currentWorkLock) { if (currentWork != null && !currentWork.isTerminal()) { return currentWork; } currentWork = workQueue.poll(); while (currentWork != null && currentWork.isTerminal()) { currentWork = workQueue.poll(); } if (currentWork != null) { final SynchronousReceiveWork work = currentWork; logger.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .log("Current work updated."); work.start(); requestUpstream(work.getNumberOfEvents()); } return currentWork; } } /** * Adds additional credits upstream if {@code numberOfMessages} is greater than the number of {@code REQUESTED} * items. * * @param numberOfMessages Number of messages required downstream. */ private void requestUpstream(long numberOfMessages) { if (isTerminated()) { logger.info("Cannot request more messages upstream. Subscriber is terminated."); return; } final Subscription subscription = UPSTREAM.get(this); if (subscription == null) { logger.info("There is no upstream to request messages from."); return; } final long currentRequested = REQUESTED.get(this); final long difference = numberOfMessages - currentRequested; logger.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, currentRequested) .addKeyValue("numberOfMessages", numberOfMessages) .addKeyValue("difference", difference) .log("Requesting messages from upstream."); if (difference <= 0) { return; } Operators.addCap(REQUESTED, this, difference); subscription.request(difference); } @Override public void dispose() { super.dispose(); dispose("Upstream completed the receive work.", null); } private void dispose(String message, Throwable throwable) { super.dispose(); if (isDisposed.getAndSet(true)) { return; } synchronized (currentWorkLock) { if (currentWork != null) { currentWork.complete(message, throwable); currentWork = null; } SynchronousReceiveWork w = workQueue.poll(); while (w != null) { w.complete(message, throwable); w = workQueue.poll(); } } } /** * package-private method to check queue size. * * @return The current number of items in the queue. */ int getWorkQueueSize() { return this.workQueue.size(); } }
class SynchronousMessageSubscriber extends BaseSubscriber<ServiceBusReceivedMessage> { private final ClientLogger logger = new ClientLogger(SynchronousMessageSubscriber.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger wip = new AtomicInteger(); private final ConcurrentLinkedQueue<SynchronousReceiveWork> workQueue = new ConcurrentLinkedQueue<>(); private final ConcurrentLinkedDeque<ServiceBusReceivedMessage> bufferMessages = new ConcurrentLinkedDeque<>(); private final Object currentWorkLock = new Object(); private final ServiceBusReceiverAsyncClient asyncClient; private final boolean isPrefetchDisabled; private final Duration operationTimeout; private volatile SynchronousReceiveWork currentWork; /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<SynchronousMessageSubscriber> REQUESTED = AtomicLongFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, "requested"); private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<SynchronousMessageSubscriber, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, Subscription.class, "upstream"); /** * Creates a synchronous subscriber with some initial work to queue. * * * @param asyncClient Client to update disposition of messages. * @param isPrefetchDisabled Indicates if the prefetch is disabled. * @param operationTimeout Timeout to wait for operation to complete. * @param initialWork Initial work to queue. * * <p> * When {@code isPrefetchDisabled} is true, we release the messages those received during the timespan * between the last terminated downstream and the next active downstream. * </p> * * @throws NullPointerException if {@code initialWork} is null. * @throws IllegalArgumentException if {@code initialWork.getNumberOfEvents()} is less than 1. */ SynchronousMessageSubscriber(ServiceBusReceiverAsyncClient asyncClient, SynchronousReceiveWork initialWork, boolean isPrefetchDisabled, Duration operationTimeout) { this.asyncClient = Objects.requireNonNull(asyncClient, "'asyncClient' cannot be null."); this.operationTimeout = Objects.requireNonNull(operationTimeout, "'operationTimeout' cannot be null."); this.workQueue.add(Objects.requireNonNull(initialWork, "'initialWork' cannot be null.")); this.isPrefetchDisabled = isPrefetchDisabled; if (initialWork.getNumberOfEvents() < 1) { throw logger.logExceptionAsError(new IllegalArgumentException( "'numberOfEvents' cannot be less than 1. Actual: " + initialWork.getNumberOfEvents())); } Operators.addCap(REQUESTED, this, initialWork.getNumberOfEvents()); } /** * On an initial subscription, will take the first work item, and request that amount of work for it. * * @param subscription Subscription for upstream. */ @Override protected void hookOnSubscribe(Subscription subscription) { if (!Operators.setOnce(UPSTREAM, this, subscription)) { logger.warning("This should only be subscribed to once. Ignoring subscription."); return; } getOrUpdateCurrentWork(); subscription.request(REQUESTED.get(this)); } /** * Publishes the event to the current {@link SynchronousReceiveWork}. If that work item is complete, will dispose of * the subscriber. * * @param message Event to publish. */ @Override protected void hookOnNext(ServiceBusReceivedMessage message) { if (isTerminated()) { Operators.onNextDropped(message, Context.empty()); } else { bufferMessages.add(message); drain(); } } /** * Queue the work to be picked up by drain loop. * * @param work to be queued. */ void queueWork(SynchronousReceiveWork work) { Objects.requireNonNull(work, "'work' cannot be null"); workQueue.add(work); LoggingEventBuilder logBuilder = logger.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .addKeyValue("timeout", work.getTimeout()); if (workQueue.peek() == work && (currentWork == null || currentWork.isTerminal())) { logBuilder.log("First work in queue. Requesting upstream if needed."); getOrUpdateCurrentWork(); } else { logBuilder.log("Queuing receive work."); } if (UPSTREAM.get(this) != null) { drain(); } } /** * Drain the work, only one thread can be in this loop at a time. */ private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { try { drainQueue(); } finally { missed = wip.addAndGet(-missed); } } } /*** * Drain the queue using a lock on current work in progress. */ /** * {@inheritDoc} */ @Override protected void hookOnError(Throwable throwable) { dispose("Errors occurred upstream", throwable); } @Override protected void hookOnCancel() { this.dispose(); } private boolean isTerminated() { if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { return true; } return isDisposed.get(); } /** * Gets the current work item if it is not terminal and cleans up any existing timeout operations. * * @return Gets or sets the next work item. Null if there are no work items currently. */ private SynchronousReceiveWork getOrUpdateCurrentWork() { synchronized (currentWorkLock) { if (currentWork != null && !currentWork.isTerminal()) { return currentWork; } currentWork = workQueue.poll(); while (currentWork != null && currentWork.isTerminal()) { logger.atVerbose() .addKeyValue(WORK_ID_KEY, currentWork.getId()) .addKeyValue("numberOfEvents", currentWork.getNumberOfEvents()) .log("This work from queue is terminal. Skip it."); currentWork = workQueue.poll(); } if (currentWork != null) { final SynchronousReceiveWork work = currentWork; logger.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .log("Current work updated."); work.start(); requestUpstream(work.getNumberOfEvents()); } return currentWork; } } /** * Adds additional credits upstream if {@code numberOfMessages} is greater than the number of {@code REQUESTED} * items. * * @param numberOfMessages Number of messages required downstream. */ private void requestUpstream(long numberOfMessages) { if (isTerminated()) { logger.info("Cannot request more messages upstream. Subscriber is terminated."); return; } final Subscription subscription = UPSTREAM.get(this); if (subscription == null) { logger.info("There is no upstream to request messages from."); return; } final long currentRequested = REQUESTED.get(this); final long difference = numberOfMessages - currentRequested; logger.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, currentRequested) .addKeyValue("numberOfMessages", numberOfMessages) .addKeyValue("difference", difference) .log("Requesting messages from upstream."); if (difference <= 0) { return; } Operators.addCap(REQUESTED, this, difference); subscription.request(difference); } @Override public void dispose() { super.dispose(); dispose("Upstream completed the receive work.", null); } private void dispose(String message, Throwable throwable) { super.dispose(); if (isDisposed.getAndSet(true)) { return; } synchronized (currentWorkLock) { if (currentWork != null) { currentWork.complete(message, throwable); currentWork = null; } SynchronousReceiveWork w = workQueue.poll(); while (w != null) { w.complete(message, throwable); w = workQueue.poll(); } } } /** * package-private method to check queue size. * * @return The current number of items in the queue. */ int getWorkQueueSize() { return this.workQueue.size(); } }
if configuration is nullable why do we have `Configuration.NONE` ? How's `NONE` different from `null`?
public static ProxyOptions fromConfiguration(Configuration configuration) { if (configuration == Configuration.NONE) { throw LOGGER.logExceptionAsWarning(new IllegalArgumentException(INVALID_CONFIGURATION_MESSAGE)); } Configuration proxyConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; boolean createUnresolved = proxyConfiguration.get(Properties.CREATE_UNRESOLVED); return attemptToLoadProxy(proxyConfiguration, createUnresolved); }
if (configuration == Configuration.NONE) {
public static ProxyOptions fromConfiguration(Configuration configuration) { return fromConfiguration(configuration, false); }
class ProxyOptions { private static final ClientLogger LOGGER = new ClientLogger(ProxyOptions.class); private static final String INVALID_CONFIGURATION_MESSAGE = "'configuration' cannot be 'Configuration.NONE'."; private static final String INVALID_AZURE_PROXY_URL = "Configuration {} is an invalid URL and is being ignored."; /* * This indicates whether system proxy configurations (HTTPS_PROXY, HTTP_PROXY) are allowed to be used. */ private static final String JAVA_SYSTEM_PROXY_PREREQUISITE = "java.net.useSystemProxies"; private static final int DEFAULT_HTTPS_PORT = 443; private static final int DEFAULT_HTTP_PORT = 80; /* * The 'http.nonProxyHosts' system property is expected to be delimited by '|', but don't split escaped '|'s. */ private static final Pattern HTTP_NON_PROXY_HOSTS_SPLIT = Pattern.compile("(?<!\\\\)\\|"); /* * The 'NO_PROXY' environment variable is expected to be delimited by ',', but don't split escaped ','s. */ private static final Pattern NO_PROXY_SPLIT = Pattern.compile("(?<!\\\\),"); private static final Pattern UNESCAPED_PERIOD = Pattern.compile("(?<!\\\\)\\."); private static final Pattern ANY = Pattern.compile("\\*"); private final InetSocketAddress address; private final Type type; private String username; private String password; private String nonProxyHosts; /** * Creates ProxyOptions. * * @param type the proxy type * @param address the proxy address (ip and port number) */ public ProxyOptions(Type type, InetSocketAddress address) { this.type = type; this.address = address; } /** * Set the proxy credentials. * * @param username proxy user name * @param password proxy password * @return the updated ProxyOptions object */ public ProxyOptions setCredentials(String username, String password) { this.username = Objects.requireNonNull(username, "'username' cannot be null."); this.password = Objects.requireNonNull(password, "'password' cannot be null."); return this; } /** * Sets the hosts which bypass the proxy. * <p> * The expected format of the passed string is a {@code '|'} delimited list of hosts which should bypass the proxy. * Individual host strings may contain regex characters such as {@code '*'}. * * @param nonProxyHosts Hosts that bypass the proxy. * @return the updated ProxyOptions object */ public ProxyOptions setNonProxyHosts(String nonProxyHosts) { this.nonProxyHosts = sanitizeJavaHttpNonProxyHosts(nonProxyHosts); return this; } /** * @return the address of the proxy. */ public InetSocketAddress getAddress() { return address; } /** * @return the type of the proxy. */ public Type getType() { return type; } /** * @return the proxy user name. */ public String getUsername() { return this.username; } /** * @return the proxy password. */ public String getPassword() { return this.password; } /** * @return the hosts that bypass the proxy. */ public String getNonProxyHosts() { return this.nonProxyHosts; } /** * Attempts to load a proxy from the environment. * <p> * If a proxy is found and loaded the proxy address is DNS resolved. * <p> * Environment configurations are loaded in this order: * <ol> * <li>Azure HTTPS</li> * <li>Azure HTTP</li> * <li>Java HTTPS</li> * <li>Java HTTP</li> * </ol> * * Azure proxy configurations will be preferred over Java proxy configurations as they are more closely scoped to * the purpose of the SDK. Additionally, more secure protocols, HTTPS vs HTTP, will be preferred. * * <p> * {@code null} will be returned if no proxy was found in the environment. * * @param configuration The {@link Configuration} that is used to load proxy configurations from the environment. If * {@code null} is passed then {@link Configuration * Configuration * @return A {@link ProxyOptions} reflecting a proxy loaded from the environment, if no proxy is found {@code null} * will be returned. * @throws IllegalArgumentException If {@code configuration} is {@link Configuration */ /** * Attempts to load a proxy from the environment. * <p> * If a proxy is found and loaded, the proxy address is DNS resolved based on {@code createUnresolved}. When {@code * createUnresolved} is true resolving {@link * calls. * <p> * Environment configurations are loaded in this order: * <ol> * <li>Azure HTTPS</li> * <li>Azure HTTP</li> * <li>Java HTTPS</li> * <li>Java HTTP</li> * </ol> * * Azure proxy configurations will be preferred over Java proxy configurations as they are more closely scoped to * the purpose of the SDK. Additionally, more secure protocols, HTTPS vs HTTP, will be preferred. * <p> * {@code null} will be returned if no proxy was found in the environment. * * @param configuration The {@link Configuration} that is used to load proxy configurations from the environment. If * {@code null} is passed then {@link Configuration * Configuration * @param createUnresolved Flag determining whether the returned {@link ProxyOptions} is unresolved. * @return A {@link ProxyOptions} reflecting a proxy loaded from the environment, if no proxy is found {@code null} * will be returned. * @throws IllegalArgumentException If {@code configuration} is {@link Configuration */ public static ProxyOptions fromConfiguration(Configuration configuration, boolean createUnresolved) { if (configuration == Configuration.NONE) { throw LOGGER.logExceptionAsWarning(new IllegalArgumentException(INVALID_CONFIGURATION_MESSAGE)); } Configuration proxyConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; return attemptToLoadProxy(proxyConfiguration, createUnresolved); } private static ProxyOptions attemptToLoadProxy(Configuration configuration, boolean createUnresolved) { ProxyOptions proxyOptions; if (Boolean.parseBoolean(configuration.get(JAVA_SYSTEM_PROXY_PREREQUISITE))) { proxyOptions = attemptToLoadSystemProxy(configuration, createUnresolved, Configuration.PROPERTY_HTTPS_PROXY); if (proxyOptions != null) { LOGGER.verbose("Using proxy created from HTTPS_PROXY environment variable."); return proxyOptions; } proxyOptions = attemptToLoadSystemProxy(configuration, createUnresolved, Configuration.PROPERTY_HTTP_PROXY); if (proxyOptions != null) { LOGGER.verbose("Using proxy created from HTTP_PROXY environment variable."); return proxyOptions; } } proxyOptions = attemptToLoadJavaProxy(configuration, createUnresolved, Properties.HTTPS_PROXY_HOST, Properties.HTTPS_PROXY_PORT, Properties.HTTPS_PROXY_USER, Properties.HTTPS_PROXY_PASSWORD, DEFAULT_HTTPS_PORT); if (proxyOptions != null) { LOGGER.verbose("Using proxy created from JVM HTTPS system properties."); return proxyOptions; } proxyOptions = attemptToLoadJavaProxy(configuration, createUnresolved, Properties.HTTP_PROXY_HOST, Properties.HTTP_PROXY_PORT, Properties.HTTP_PROXY_USER, Properties.HTTP_PROXY_PASSWORD, DEFAULT_HTTP_PORT); if (proxyOptions != null) { LOGGER.verbose("Using proxy created from JVM HTTP system properties."); return proxyOptions; } return null; } private static ProxyOptions attemptToLoadSystemProxy(Configuration configuration, boolean createUnresolved, String proxyProperty) { String proxyConfiguration = configuration.get(proxyProperty); if (CoreUtils.isNullOrEmpty(proxyConfiguration)) { return null; } try { URL proxyUrl = new URL(proxyConfiguration); int port = (proxyUrl.getPort() == -1) ? proxyUrl.getDefaultPort() : proxyUrl.getPort(); InetSocketAddress socketAddress = (createUnresolved) ? InetSocketAddress.createUnresolved(proxyUrl.getHost(), port) : new InetSocketAddress(proxyUrl.getHost(), port); ProxyOptions proxyOptions = new ProxyOptions(ProxyOptions.Type.HTTP, socketAddress); String nonProxyHostsString = configuration.get(Configuration.PROPERTY_NO_PROXY); if (!CoreUtils.isNullOrEmpty(nonProxyHostsString)) { proxyOptions.nonProxyHosts = sanitizeNoProxy(nonProxyHostsString); LOGGER.log(LogLevel.VERBOSE, () -> "Using non-proxy host regex: " + proxyOptions.nonProxyHosts); } String userInfo = proxyUrl.getUserInfo(); if (userInfo != null) { String[] usernamePassword = userInfo.split(":", 2); if (usernamePassword.length == 2) { try { proxyOptions.setCredentials( URLDecoder.decode(usernamePassword[0], StandardCharsets.UTF_8.toString()), URLDecoder.decode(usernamePassword[1], StandardCharsets.UTF_8.toString()) ); } catch (UnsupportedEncodingException e) { return null; } } } return proxyOptions; } catch (MalformedURLException ex) { LOGGER.warning(INVALID_AZURE_PROXY_URL, proxyProperty); return null; } } /* * Helper function that sanitizes 'NO_PROXY' into a Pattern safe string. */ static String sanitizeNoProxy(String noProxyString) { return sanitizeNonProxyHosts(NO_PROXY_SPLIT.split(noProxyString)); } private static ProxyOptions attemptToLoadJavaProxy(Configuration configuration, boolean createUnresolved, ConfigurationProperty<String> hostProperty, ConfigurationProperty<Integer> portProperty, ConfigurationProperty<String> userProperty, ConfigurationProperty<String> passwordProperty, Integer defaultPort) { String host = configuration.get(hostProperty); if (CoreUtils.isNullOrEmpty(host)) { return null; } int port; try { port = configuration.get(portProperty); } catch (NumberFormatException ex) { port = defaultPort; } InetSocketAddress socketAddress = (createUnresolved) ? InetSocketAddress.createUnresolved(host, port) : new InetSocketAddress(host, port); ProxyOptions proxyOptions = new ProxyOptions(ProxyOptions.Type.HTTP, socketAddress); String nonProxyHostsString = configuration.get(Properties.NO_PROXY); if (!CoreUtils.isNullOrEmpty(nonProxyHostsString)) { proxyOptions.nonProxyHosts = sanitizeJavaHttpNonProxyHosts(nonProxyHostsString); LOGGER.log(LogLevel.VERBOSE, () -> "Using non-proxy host regex: " + proxyOptions.nonProxyHosts); } String username = configuration.get(userProperty); String password = configuration.get(passwordProperty); if (username != null && password != null) { proxyOptions.setCredentials(username, password); } return proxyOptions; } /* * Helper function that sanitizes 'http.nonProxyHosts' into a Pattern safe string. */ static String sanitizeJavaHttpNonProxyHosts(String nonProxyHostsString) { return sanitizeNonProxyHosts(HTTP_NON_PROXY_HOSTS_SPLIT.split(nonProxyHostsString)); } private static String sanitizeNonProxyHosts(String[] nonProxyHosts) { StringBuilder sanitizedBuilder = new StringBuilder(); for (int i = 0; i < nonProxyHosts.length; i++) { if (i > 0) { sanitizedBuilder.append("|"); } String prefixWildcard = ""; String suffixWildcard = ""; String sanitizedNonProxyHost = nonProxyHosts[i]; /* * If the non-proxy host begins with either '.', '*', '.*', or any of the previous with a trailing '?' * substring the non-proxy host and set the wildcard prefix. */ if (sanitizedNonProxyHost.startsWith(".")) { prefixWildcard = ".*?"; sanitizedNonProxyHost = sanitizedNonProxyHost.substring(1); } else if (sanitizedNonProxyHost.startsWith(".?")) { prefixWildcard = ".*?"; sanitizedNonProxyHost = sanitizedNonProxyHost.substring(2); } else if (sanitizedNonProxyHost.startsWith("*?")) { prefixWildcard = ".*?"; sanitizedNonProxyHost = sanitizedNonProxyHost.substring(2); } else if (sanitizedNonProxyHost.startsWith("*")) { prefixWildcard = ".*?"; sanitizedNonProxyHost = sanitizedNonProxyHost.substring(1); } else if (sanitizedNonProxyHost.startsWith(".*?")) { prefixWildcard = ".*?"; sanitizedNonProxyHost = sanitizedNonProxyHost.substring(3); } else if (sanitizedNonProxyHost.startsWith(".*")) { prefixWildcard = ".*?"; sanitizedNonProxyHost = sanitizedNonProxyHost.substring(2); } /* * Same with the ending of the non-proxy host, if it has a suffix wildcard trim the non-proxy host and * retain the suffix wildcard. */ if (sanitizedNonProxyHost.endsWith(".")) { suffixWildcard = ".*?"; sanitizedNonProxyHost = sanitizedNonProxyHost.substring(0, sanitizedNonProxyHost.length() - 2); } else if (sanitizedNonProxyHost.endsWith(".?")) { suffixWildcard = ".*?"; sanitizedNonProxyHost = sanitizedNonProxyHost.substring(0, sanitizedNonProxyHost.length() - 3); } else if (sanitizedNonProxyHost.endsWith("*?")) { suffixWildcard = ".*?"; sanitizedNonProxyHost = sanitizedNonProxyHost.substring(0, sanitizedNonProxyHost.length() - 3); } else if (sanitizedNonProxyHost.endsWith("*")) { suffixWildcard = ".*?"; sanitizedNonProxyHost = sanitizedNonProxyHost.substring(0, sanitizedNonProxyHost.length() - 2); } else if (sanitizedNonProxyHost.endsWith(".*?")) { suffixWildcard = ".*?"; sanitizedNonProxyHost = sanitizedNonProxyHost.substring(0, sanitizedNonProxyHost.length() - 4); } else if (sanitizedNonProxyHost.endsWith(".*")) { suffixWildcard = ".*?"; sanitizedNonProxyHost = sanitizedNonProxyHost.substring(0, sanitizedNonProxyHost.length() - 3); } try { String attemptToSanitizeAsRegex = sanitizedNonProxyHost; attemptToSanitizeAsRegex = UNESCAPED_PERIOD.matcher(attemptToSanitizeAsRegex).replaceAll("\\\\."); attemptToSanitizeAsRegex = ANY.matcher(attemptToSanitizeAsRegex).replaceAll("\\.*?"); sanitizedNonProxyHost = Pattern.compile(attemptToSanitizeAsRegex).pattern(); } catch (PatternSyntaxException ex) { /* * Replace the non-proxy host with the sanitized value. * * The body of the non-proxy host is quoted to handle scenarios such a '127.0.0.1' or '*.azure.com' * where without quoting the '.' in the string would be treated as the match any character instead of * the literal '.' character. */ sanitizedNonProxyHost = Pattern.quote(sanitizedNonProxyHost); } sanitizedBuilder.append("(") .append(prefixWildcard) .append(sanitizedNonProxyHost) .append(suffixWildcard) .append(")"); } return sanitizedBuilder.toString(); } /** * The type of the proxy. */ public enum Type { /** * HTTP proxy type. */ HTTP(Proxy.Type.HTTP), /** * SOCKS4 proxy type. */ SOCKS4(Proxy.Type.SOCKS), /** * SOCKS5 proxy type. */ SOCKS5(Proxy.Type.SOCKS); private final Proxy.Type proxyType; Type(Proxy.Type proxyType) { this.proxyType = proxyType; } /** * Get the {@link Proxy.Type} equivalent of this type. * * @return the proxy type */ public Proxy.Type toProxyType() { return proxyType; } } private static class Properties { public static final ConfigurationProperty<String> NO_PROXY = ConfigurationProperty.stringPropertyBuilder("http.proxy.non-proxy-hosts") .environmentAliases("http.nonProxyHosts", Configuration.PROPERTY_NO_PROXY) .canLogValue(true) .build(); public static final ConfigurationProperty<Boolean> CREATE_UNRESOLVED = ConfigurationUtils.booleanSharedPropertyBuilder("http.proxy.create-unresolved") .defaultValue(false) .build(); public static final ConfigurationProperty<String> HTTP_PROXY_HOST = ConfigurationUtils.stringSharedPropertyBuilder("http.proxy.host") .environmentAliases("http.proxyHost") .canLogValue(true) .build(); public static final ConfigurationProperty<Integer> HTTP_PROXY_PORT = ConfigurationUtils.integerSharedPropertyBuilder("http.proxy.port") .environmentAliases("http.proxyPort") .defaultValue(DEFAULT_HTTP_PORT) .build(); public static final ConfigurationProperty<String> HTTP_PROXY_USER = ConfigurationUtils.stringSharedPropertyBuilder("http.proxy.username") .environmentAliases("http.proxyUser") .build(); public static final ConfigurationProperty<String> HTTP_PROXY_PASSWORD = ConfigurationUtils.stringSharedPropertyBuilder("http.proxy.password") .environmentAliases("http.proxyPassword") .build(); public static final ConfigurationProperty<String> HTTPS_PROXY_HOST = ConfigurationUtils.stringSharedPropertyBuilder("https.proxy.host") .environmentAliases("https.proxyHost") .canLogValue(true) .build(); public static final ConfigurationProperty<Integer> HTTPS_PROXY_PORT = ConfigurationUtils.integerSharedPropertyBuilder("https.proxy.port") .environmentAliases("https.proxyPort") .defaultValue(DEFAULT_HTTPS_PORT) .build(); public static final ConfigurationProperty<String> HTTPS_PROXY_USER = ConfigurationUtils.stringSharedPropertyBuilder("https.proxy.username") .environmentAliases("https.proxyUser") .build(); public static final ConfigurationProperty<String> HTTPS_PROXY_PASSWORD = ConfigurationUtils.stringSharedPropertyBuilder("https.proxy.password") .environmentAliases("https.proxyPassword") .build(); } }
class ProxyOptions { private static final ClientLogger LOGGER = new ClientLogger(ProxyOptions.class); private static final String INVALID_AZURE_PROXY_URL = "Configuration {} is an invalid URL and is being ignored."; /* * This indicates whether system proxy configurations (HTTPS_PROXY, HTTP_PROXY) are allowed to be used. */ private static final String JAVA_SYSTEM_PROXY_PREREQUISITE = "java.net.useSystemProxies"; /* * Java environment variables related to proxies. The protocol is removed since these are the same for 'https' and * 'http', the exception is 'http.nonProxyHosts' as it is used for both. */ private static final String JAVA_PROXY_HOST = "proxyHost"; private static final String JAVA_PROXY_PORT = "proxyPort"; private static final String JAVA_PROXY_USER = "proxyUser"; private static final String JAVA_PROXY_PASSWORD = "proxyPassword"; private static final String JAVA_NON_PROXY_HOSTS = "http.nonProxyHosts"; private static final String HTTPS = "https"; private static final int DEFAULT_HTTPS_PORT = 443; private static final String HTTP = "http"; private static final int DEFAULT_HTTP_PORT = 80; /* * The 'http.nonProxyHosts' system property is expected to be delimited by '|', but don't split escaped '|'s. */ private static final Pattern HTTP_NON_PROXY_HOSTS_SPLIT = Pattern.compile("(?<!\\\\)\\|"); /* * The 'NO_PROXY' environment variable is expected to be delimited by ',', but don't split escaped ','s. */ private static final Pattern NO_PROXY_SPLIT = Pattern.compile("(?<!\\\\),"); private static final Pattern UNESCAPED_PERIOD = Pattern.compile("(?<!\\\\)\\."); private static final Pattern ANY = Pattern.compile("\\*"); private static final ConfigurationProperty<String> NON_PROXY_PROPERTY = ConfigurationPropertyBuilder.ofString(ConfigurationProperties.HTTP_PROXY_NON_PROXY_HOSTS) .shared(true) .logValue(true) .build(); private static final ConfigurationProperty<String> HOST_PROPERTY = ConfigurationPropertyBuilder.ofString(ConfigurationProperties.HTTP_PROXY_HOST) .shared(true) .logValue(true) .build(); private static final ConfigurationProperty<Integer> PORT_PROPERTY = ConfigurationPropertyBuilder.ofInteger(ConfigurationProperties.HTTP_PROXY_PORT) .shared(true) .defaultValue(DEFAULT_HTTPS_PORT) .build(); private static final ConfigurationProperty<String> USER_PROPERTY = ConfigurationPropertyBuilder.ofString(ConfigurationProperties.HTTP_PROXY_USER) .shared(true) .logValue(true) .build(); private static final ConfigurationProperty<String> PASSWORD_PROPERTY = ConfigurationPropertyBuilder.ofString(ConfigurationProperties.HTTP_PROXY_PASSWORD) .shared(true) .build(); private final InetSocketAddress address; private final Type type; private String username; private String password; private String nonProxyHosts; /** * Creates ProxyOptions. * * @param type the proxy type * @param address the proxy address (ip and port number) */ public ProxyOptions(Type type, InetSocketAddress address) { this.type = type; this.address = address; } /** * Set the proxy credentials. * * @param username proxy user name * @param password proxy password * @return the updated ProxyOptions object */ public ProxyOptions setCredentials(String username, String password) { this.username = Objects.requireNonNull(username, "'username' cannot be null."); this.password = Objects.requireNonNull(password, "'password' cannot be null."); return this; } /** * Sets the hosts which bypass the proxy. * <p> * The expected format of the passed string is a {@code '|'} delimited list of hosts which should bypass the proxy. * Individual host strings may contain regex characters such as {@code '*'}. * * @param nonProxyHosts Hosts that bypass the proxy. * @return the updated ProxyOptions object */ public ProxyOptions setNonProxyHosts(String nonProxyHosts) { this.nonProxyHosts = sanitizeJavaHttpNonProxyHosts(nonProxyHosts); return this; } /** * @return the address of the proxy. */ public InetSocketAddress getAddress() { return address; } /** * @return the type of the proxy. */ public Type getType() { return type; } /** * @return the proxy user name. */ public String getUsername() { return this.username; } /** * @return the proxy password. */ public String getPassword() { return this.password; } /** * @return the hosts that bypass the proxy. */ public String getNonProxyHosts() { return this.nonProxyHosts; } /** * Attempts to load a proxy from the configuration. * <p> * If a proxy is found and loaded the proxy address is DNS resolved. * <p> * Environment configurations are loaded in this order: * <ol> * <li>Azure HTTPS</li> * <li>Azure HTTP</li> * <li>Java HTTPS</li> * <li>Java HTTP</li> * </ol> * * Azure proxy configurations will be preferred over Java proxy configurations as they are more closely scoped to * the purpose of the SDK. Additionally, more secure protocols, HTTPS vs HTTP, will be preferred. * * <p> * {@code null} will be returned if no proxy was found in the environment. * * @param configuration The {@link Configuration} that is used to load proxy configurations from the environment. If * {@code null} is passed then {@link Configuration * @return A {@link ProxyOptions} reflecting a proxy loaded from the environment, if no proxy is found {@code null} * will be returned. */ /** * Attempts to load a proxy from the environment. * <p> * If a proxy is found and loaded, the proxy address is DNS resolved based on {@code createUnresolved}. When {@code * createUnresolved} is true resolving {@link * calls. * <p> * Environment configurations are loaded in this order: * <ol> * <li>Azure HTTPS</li> * <li>Azure HTTP</li> * <li>Java HTTPS</li> * <li>Java HTTP</li> * </ol> * * Azure proxy configurations will be preferred over Java proxy configurations as they are more closely scoped to * the purpose of the SDK. Additionally, more secure protocols, HTTPS vs HTTP, will be preferred. * <p> * {@code null} will be returned if no proxy was found in the environment. * * @param configuration The {@link Configuration} that is used to load proxy configurations from the environment. If * {@code null} is passed then {@link Configuration * Configuration * @param createUnresolved Flag determining whether the returned {@link ProxyOptions} is unresolved. * @return A {@link ProxyOptions} reflecting a proxy loaded from the environment, if no proxy is found {@code null} * will be returned. */ public static ProxyOptions fromConfiguration(Configuration configuration, boolean createUnresolved) { Configuration proxyConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; return attemptToLoadProxy(proxyConfiguration, createUnresolved); } private static ProxyOptions attemptToLoadProxy(Configuration configuration, boolean createUnresolved) { if (configuration == Configuration.NONE) { return null; } ProxyOptions proxyOptions = null; if (Boolean.parseBoolean(configuration.get(JAVA_SYSTEM_PROXY_PREREQUISITE))) { proxyOptions = attemptToLoadSystemProxy(configuration, createUnresolved, Configuration.PROPERTY_HTTPS_PROXY); if (proxyOptions != null) { LOGGER.verbose("Using proxy created from HTTPS_PROXY environment variable."); return proxyOptions; } proxyOptions = attemptToLoadSystemProxy(configuration, createUnresolved, Configuration.PROPERTY_HTTP_PROXY); if (proxyOptions != null) { LOGGER.verbose("Using proxy created from HTTP_PROXY environment variable."); return proxyOptions; } } proxyOptions = attemptToLoadAzureSdkProxy(configuration, createUnresolved); if (proxyOptions != null) { return proxyOptions; } proxyOptions = attemptToLoadJavaProxy(configuration, createUnresolved, HTTPS); if (proxyOptions != null) { LOGGER.verbose("Using proxy created from JVM HTTPS system properties."); return proxyOptions; } proxyOptions = attemptToLoadJavaProxy(configuration, createUnresolved, HTTP); if (proxyOptions != null) { LOGGER.verbose("Using proxy created from JVM HTTP system properties."); return proxyOptions; } return null; } private static ProxyOptions attemptToLoadSystemProxy(Configuration configuration, boolean createUnresolved, String proxyProperty) { String proxyConfiguration = configuration.get(proxyProperty); if (CoreUtils.isNullOrEmpty(proxyConfiguration)) { return null; } try { URL proxyUrl = new URL(proxyConfiguration); int port = (proxyUrl.getPort() == -1) ? proxyUrl.getDefaultPort() : proxyUrl.getPort(); InetSocketAddress socketAddress = (createUnresolved) ? InetSocketAddress.createUnresolved(proxyUrl.getHost(), port) : new InetSocketAddress(proxyUrl.getHost(), port); ProxyOptions proxyOptions = new ProxyOptions(ProxyOptions.Type.HTTP, socketAddress); String nonProxyHostsString = configuration.get(Configuration.PROPERTY_NO_PROXY); if (!CoreUtils.isNullOrEmpty(nonProxyHostsString)) { proxyOptions.nonProxyHosts = sanitizeNoProxy(nonProxyHostsString); LOGGER.log(LogLevel.VERBOSE, () -> "Using non-proxy host regex: " + proxyOptions.nonProxyHosts); } String userInfo = proxyUrl.getUserInfo(); if (userInfo != null) { String[] usernamePassword = userInfo.split(":", 2); if (usernamePassword.length == 2) { try { proxyOptions.setCredentials( URLDecoder.decode(usernamePassword[0], StandardCharsets.UTF_8.toString()), URLDecoder.decode(usernamePassword[1], StandardCharsets.UTF_8.toString()) ); } catch (UnsupportedEncodingException e) { return null; } } } return proxyOptions; } catch (MalformedURLException ex) { LOGGER.warning(INVALID_AZURE_PROXY_URL, proxyProperty); return null; } } /* * Helper function that sanitizes 'NO_PROXY' into a Pattern safe string. */ static String sanitizeNoProxy(String noProxyString) { return sanitizeNonProxyHosts(NO_PROXY_SPLIT.split(noProxyString)); } private static ProxyOptions attemptToLoadJavaProxy(Configuration configuration, boolean createUnresolved, String type) { String host = configuration.get(type + "." + JAVA_PROXY_HOST); if (CoreUtils.isNullOrEmpty(host)) { return null; } int port; try { port = Integer.parseInt(configuration.get(type + "." + JAVA_PROXY_PORT)); } catch (NumberFormatException ex) { port = HTTPS.equals(type) ? DEFAULT_HTTPS_PORT : DEFAULT_HTTP_PORT; } String nonProxyHostsString = configuration.get(JAVA_NON_PROXY_HOSTS); String username = configuration.get(type + "." + JAVA_PROXY_USER); String password = configuration.get(type + "." + JAVA_PROXY_PASSWORD); return createOptions(host, port, nonProxyHostsString, username, password, createUnresolved); } private static ProxyOptions attemptToLoadAzureSdkProxy(Configuration configuration, boolean createUnresolved) { String host = configuration.get(HOST_PROPERTY); if (CoreUtils.isNullOrEmpty(host)) { return null; } int port = configuration.get(PORT_PROPERTY); String nonProxyHostsString = configuration.get(NON_PROXY_PROPERTY); String username = configuration.get(USER_PROPERTY); String password = configuration.get(PASSWORD_PROPERTY); return createOptions(host, port, nonProxyHostsString, username, password, createUnresolved); } private static ProxyOptions createOptions(String host, int port, String nonProxyHostsString, String username, String password, boolean createUnresolved) { InetSocketAddress socketAddress = (createUnresolved) ? InetSocketAddress.createUnresolved(host, port) : new InetSocketAddress(host, port); ProxyOptions proxyOptions = new ProxyOptions(ProxyOptions.Type.HTTP, socketAddress); if (!CoreUtils.isNullOrEmpty(nonProxyHostsString)) { proxyOptions.nonProxyHosts = sanitizeJavaHttpNonProxyHosts(nonProxyHostsString); LOGGER.log(LogLevel.VERBOSE, () -> "Using non-proxy host regex: " + proxyOptions.nonProxyHosts); } if (username != null && password != null) { proxyOptions.setCredentials(username, password); } return proxyOptions; } /* * Helper function that sanitizes 'http.nonProxyHosts' into a Pattern safe string. */ static String sanitizeJavaHttpNonProxyHosts(String nonProxyHostsString) { return sanitizeNonProxyHosts(HTTP_NON_PROXY_HOSTS_SPLIT.split(nonProxyHostsString)); } private static String sanitizeNonProxyHosts(String[] nonProxyHosts) { StringBuilder sanitizedBuilder = new StringBuilder(); for (int i = 0; i < nonProxyHosts.length; i++) { if (i > 0) { sanitizedBuilder.append("|"); } String prefixWildcard = ""; String suffixWildcard = ""; String sanitizedNonProxyHost = nonProxyHosts[i]; /* * If the non-proxy host begins with either '.', '*', '.*', or any of the previous with a trailing '?' * substring the non-proxy host and set the wildcard prefix. */ if (sanitizedNonProxyHost.startsWith(".")) { prefixWildcard = ".*?"; sanitizedNonProxyHost = sanitizedNonProxyHost.substring(1); } else if (sanitizedNonProxyHost.startsWith(".?")) { prefixWildcard = ".*?"; sanitizedNonProxyHost = sanitizedNonProxyHost.substring(2); } else if (sanitizedNonProxyHost.startsWith("*?")) { prefixWildcard = ".*?"; sanitizedNonProxyHost = sanitizedNonProxyHost.substring(2); } else if (sanitizedNonProxyHost.startsWith("*")) { prefixWildcard = ".*?"; sanitizedNonProxyHost = sanitizedNonProxyHost.substring(1); } else if (sanitizedNonProxyHost.startsWith(".*?")) { prefixWildcard = ".*?"; sanitizedNonProxyHost = sanitizedNonProxyHost.substring(3); } else if (sanitizedNonProxyHost.startsWith(".*")) { prefixWildcard = ".*?"; sanitizedNonProxyHost = sanitizedNonProxyHost.substring(2); } /* * Same with the ending of the non-proxy host, if it has a suffix wildcard trim the non-proxy host and * retain the suffix wildcard. */ if (sanitizedNonProxyHost.endsWith(".")) { suffixWildcard = ".*?"; sanitizedNonProxyHost = sanitizedNonProxyHost.substring(0, sanitizedNonProxyHost.length() - 2); } else if (sanitizedNonProxyHost.endsWith(".?")) { suffixWildcard = ".*?"; sanitizedNonProxyHost = sanitizedNonProxyHost.substring(0, sanitizedNonProxyHost.length() - 3); } else if (sanitizedNonProxyHost.endsWith("*?")) { suffixWildcard = ".*?"; sanitizedNonProxyHost = sanitizedNonProxyHost.substring(0, sanitizedNonProxyHost.length() - 3); } else if (sanitizedNonProxyHost.endsWith("*")) { suffixWildcard = ".*?"; sanitizedNonProxyHost = sanitizedNonProxyHost.substring(0, sanitizedNonProxyHost.length() - 2); } else if (sanitizedNonProxyHost.endsWith(".*?")) { suffixWildcard = ".*?"; sanitizedNonProxyHost = sanitizedNonProxyHost.substring(0, sanitizedNonProxyHost.length() - 4); } else if (sanitizedNonProxyHost.endsWith(".*")) { suffixWildcard = ".*?"; sanitizedNonProxyHost = sanitizedNonProxyHost.substring(0, sanitizedNonProxyHost.length() - 3); } try { String attemptToSanitizeAsRegex = sanitizedNonProxyHost; attemptToSanitizeAsRegex = UNESCAPED_PERIOD.matcher(attemptToSanitizeAsRegex).replaceAll("\\\\."); attemptToSanitizeAsRegex = ANY.matcher(attemptToSanitizeAsRegex).replaceAll("\\.*?"); sanitizedNonProxyHost = Pattern.compile(attemptToSanitizeAsRegex).pattern(); } catch (PatternSyntaxException ex) { /* * Replace the non-proxy host with the sanitized value. * * The body of the non-proxy host is quoted to handle scenarios such a '127.0.0.1' or '*.azure.com' * where without quoting the '.' in the string would be treated as the match any character instead of * the literal '.' character. */ sanitizedNonProxyHost = Pattern.quote(sanitizedNonProxyHost); } sanitizedBuilder.append("(") .append(prefixWildcard) .append(sanitizedNonProxyHost) .append(suffixWildcard) .append(")"); } return sanitizedBuilder.toString(); } /** * The type of the proxy. */ public enum Type { /** * HTTP proxy type. */ HTTP(Proxy.Type.HTTP), /** * SOCKS4 proxy type. */ SOCKS4(Proxy.Type.SOCKS), /** * SOCKS5 proxy type. */ SOCKS5(Proxy.Type.SOCKS); private final Proxy.Type proxyType; Type(Proxy.Type proxyType) { this.proxyType = proxyType; } /** * Get the {@link Proxy.Type} equivalent of this type. * * @return the proxy type */ public Proxy.Type toProxyType() { return proxyType; } } /** * Lists available configuration property names for HTTP {@link ProxyOptions}. */ private static class ConfigurationProperties { /** * Represents a list of hosts that should be reached directly, bypassing the proxy. * This is a list of patterns separated by '|'. The patterns may start or end with a '*' for wildcards. * Any host matching one of these patterns will be reached through a direct connection instead of through a proxy. * <p> * Default value is {@code null} */ public static final String HTTP_PROXY_NON_PROXY_HOSTS = "http.proxy.non-proxy-hosts"; /** * The HTTP host name of the proxy server. * <p> * Default value is {@code null}. */ public static final String HTTP_PROXY_HOST = "http.proxy.hostname"; /** * The port number of the proxy server. * <p> * Default value is {@code 443}. */ public static final String HTTP_PROXY_PORT = "http.proxy.port"; /** * The HTTP proxy server user. * Default value is {@code null}. */ public static final String HTTP_PROXY_USER = "http.proxy.username"; /** * The HTTP proxy server password. * Default value is {@code null}. */ public static final String HTTP_PROXY_PASSWORD = "http.proxy.password"; } }
`StandardCharsets.UTF_8.name()` is more durable than a String constant
private static String decode(final String stringToDecode) { try { return URLDecoder.decode(stringToDecode, StandardCharsets.UTF_8.name()); } catch (UnsupportedEncodingException ex) { throw new RuntimeException(ex); } }
return URLDecoder.decode(stringToDecode, StandardCharsets.UTF_8.name());
private static String decode(final String stringToDecode) { try { return URLDecoder.decode(stringToDecode, StandardCharsets.UTF_8.name()); } catch (UnsupportedEncodingException ex) { throw new RuntimeException(ex); } }
class Utility { private static final ClientLogger LOGGER = new ClientLogger(Utility.class); /** * Please see <a href=https: * for more information on Azure resource provider namespaces. */ public static final String STORAGE_TRACING_NAMESPACE_VALUE = "Microsoft.Storage"; /** * Performs a safe decoding of the passed string, taking care to preserve each {@code +} character rather than * replacing it with a space character. * * @param stringToDecode String value to decode * @return the decoded string value * @throws RuntimeException If the UTF-8 charset isn't supported */ public static String urlDecode(final String stringToDecode) { if (CoreUtils.isNullOrEmpty(stringToDecode)) { return ""; } if (stringToDecode.contains("+")) { StringBuilder outBuilder = new StringBuilder(); int startDex = 0; for (int m = 0; m < stringToDecode.length(); m++) { if (stringToDecode.charAt(m) == '+') { if (m > startDex) { outBuilder.append(decode(stringToDecode.substring(startDex, m))); } outBuilder.append("+"); startDex = m + 1; } } if (startDex != stringToDecode.length()) { outBuilder.append(decode(stringToDecode.substring(startDex))); } return outBuilder.toString(); } else { return decode(stringToDecode); } } /* * Helper method to reduce duplicate calls of URLDecoder.decode */ /** * Performs a safe encoding of the specified string, taking care to insert %20 for each space character instead of * inserting the {@code +} character. * * @param stringToEncode String value to encode * @return the encoded string value * @throws RuntimeException If the UTF-8 charset ins't supported */ public static String urlEncode(final String stringToEncode) { if (stringToEncode == null) { return null; } if (stringToEncode.length() == 0) { return ""; } if (stringToEncode.contains(" ")) { StringBuilder outBuilder = new StringBuilder(); int startDex = 0; for (int m = 0; m < stringToEncode.length(); m++) { if (stringToEncode.charAt(m) == ' ') { if (m > startDex) { outBuilder.append(encode(stringToEncode.substring(startDex, m))); } outBuilder.append("%20"); startDex = m + 1; } } if (startDex != stringToEncode.length()) { outBuilder.append(encode(stringToEncode.substring(startDex))); } return outBuilder.toString(); } else { return encode(stringToEncode); } } /* * Helper method to reduce duplicate calls of URLEncoder.encode */ private static String encode(final String stringToEncode) { try { return URLEncoder.encode(stringToEncode, StandardCharsets.UTF_8.name()); } catch (UnsupportedEncodingException ex) { throw new RuntimeException(ex); } } /** * Performs a safe encoding of a url string, only encoding the path. * * @param url The url to encode. * @return The encoded url. */ public static String encodeUrlPath(String url) { /* Deconstruct the URL and reconstruct it making sure the path is encoded. */ UrlBuilder builder = UrlBuilder.parse(url); String path = builder.getPath(); if (path.startsWith("/")) { path = path.substring(1); } path = Utility.urlEncode(Utility.urlDecode(path)); builder.setPath(path); return builder.toString(); } /** * Given a String representing a date in a form of the ISO8601 pattern, generates a Date representing it with up to * millisecond precision. * * @param dateString the {@code String} to be interpreted as a <code>Date</code> * @return the corresponding <code>Date</code> object * @throws IllegalArgumentException If {@code dateString} doesn't match an ISO8601 pattern * @deprecated Use {@link StorageImplUtils */ @Deprecated public static OffsetDateTime parseDate(String dateString) { return StorageImplUtils.parseDateAndFormat(dateString).getDateTime(); } /** * A utility method for converting the input stream to Flux of ByteBuffer. Will check the equality of entity length * and the input length. * * @param data The input data which needs to convert to ByteBuffer. * @param length The expected input data length. * @param blockSize The size of each ByteBuffer. * @return {@link ByteBuffer} which contains the input data. * @throws UnexpectedLengthException when input data length mismatch input length. * @throws RuntimeException When I/O error occurs. */ public static Flux<ByteBuffer> convertStreamToByteBuffer(InputStream data, long length, int blockSize) { return convertStreamToByteBuffer(data, length, blockSize, true); } /** * A utility method for converting the input stream to Flux of ByteBuffer. Will check the equality of entity length * and the input length. * * Using markAndReset=true to force a seekable stream implies a buffering strategy is not being used, in which case * length is still needed for whatever underlying REST call is being streamed to. If markAndReset=false and data is * being buffered, consider using {@link com.azure.core.util.FluxUtil * does not require a data length. * * @param data The input data which needs to convert to ByteBuffer. * @param length The expected input data length. * @param blockSize The size of each ByteBuffer. * @param markAndReset Whether the stream needs to be marked and reset. This should generally always be true to * support retries. It is false in the case of buffered upload to support non markable streams because buffered * upload uses its own mechanisms to support retries. * @return {@link ByteBuffer} which contains the input data. * @throws UnexpectedLengthException when input data length mismatch input length. * @throws RuntimeException When I/O error occurs. */ public static Flux<ByteBuffer> convertStreamToByteBuffer(InputStream data, long length, int blockSize, boolean markAndReset) { if (markAndReset) { data.mark(Integer.MAX_VALUE); } if (length == 0) { try { if (data.read() != -1) { long totalLength = 1 + data.available(); throw LOGGER.logExceptionAsError(new UnexpectedLengthException( String.format("Request body emitted %d bytes, more than the expected %d bytes.", totalLength, length), totalLength, length)); } } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException("I/O errors occurred", e)); } } return Flux.defer(() -> { /* If the request needs to be retried, the flux will be resubscribed to. The stream and counter must be reset in order to correctly return the same data again. */ final long[] currentTotalLength = new long[1]; if (markAndReset) { try { data.reset(); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } return Flux.range(0, (int) Math.ceil((double) length / (double) blockSize)) .map(i -> i * blockSize) .concatMap(pos -> Mono.fromCallable(() -> { long count = pos + blockSize > length ? length - pos : blockSize; byte[] cache = new byte[(int) count]; int numOfBytes = 0; int offset = 0; int len = (int) count; while (numOfBytes != -1 && offset < count) { numOfBytes = data.read(cache, offset, len); if (numOfBytes != -1) { offset += numOfBytes; len -= numOfBytes; currentTotalLength[0] += numOfBytes; } } if (numOfBytes == -1 && currentTotalLength[0] < length) { throw LOGGER.logExceptionAsError(new UnexpectedLengthException( String.format("Request body emitted %d bytes, less than the expected %d bytes.", currentTotalLength[0], length), currentTotalLength[0], length)); } if (currentTotalLength[0] >= length) { try { if (data.read() != -1) { long totalLength = 1 + currentTotalLength[0] + data.available(); throw LOGGER.logExceptionAsError(new UnexpectedLengthException( String.format("Request body emitted %d bytes, more than the expected %d bytes.", totalLength, length), totalLength, length)); } else if (currentTotalLength[0] > length) { throw LOGGER.logExceptionAsError(new IllegalStateException( String.format("Read more data than was requested. Size of data read: %d. Size of data" + " requested: %d", currentTotalLength[0], length))); } } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException("I/O errors occurred", e)); } } return ByteBuffer.wrap(cache, 0, offset); })); }); } /** * Appends a query parameter to a url. * * @param url The url. * @param key The query key. * @param value The query value. * @return The updated url. */ public static String appendQueryParameter(String url, String key, String value) { if (url.contains("?")) { url = String.format("%s&%s=%s", url, key, value); } else { url = String.format("%s?%s=%s", url, key, value); } return url; } }
class Utility { private static final ClientLogger LOGGER = new ClientLogger(Utility.class); /** * Please see <a href=https: * for more information on Azure resource provider namespaces. */ public static final String STORAGE_TRACING_NAMESPACE_VALUE = "Microsoft.Storage"; /** * Performs a safe decoding of the passed string, taking care to preserve each {@code +} character rather than * replacing it with a space character. * * @param stringToDecode String value to decode * @return the decoded string value * @throws RuntimeException If the UTF-8 charset isn't supported */ public static String urlDecode(final String stringToDecode) { if (CoreUtils.isNullOrEmpty(stringToDecode)) { return ""; } if (stringToDecode.contains("+")) { StringBuilder outBuilder = new StringBuilder(); int startDex = 0; for (int m = 0; m < stringToDecode.length(); m++) { if (stringToDecode.charAt(m) == '+') { if (m > startDex) { outBuilder.append(decode(stringToDecode.substring(startDex, m))); } outBuilder.append("+"); startDex = m + 1; } } if (startDex != stringToDecode.length()) { outBuilder.append(decode(stringToDecode.substring(startDex))); } return outBuilder.toString(); } else { return decode(stringToDecode); } } /* * Helper method to reduce duplicate calls of URLDecoder.decode */ /** * Performs a safe encoding of the specified string, taking care to insert %20 for each space character instead of * inserting the {@code +} character. * * @param stringToEncode String value to encode * @return the encoded string value * @throws RuntimeException If the UTF-8 charset ins't supported */ public static String urlEncode(final String stringToEncode) { if (stringToEncode == null) { return null; } if (stringToEncode.length() == 0) { return ""; } if (stringToEncode.contains(" ")) { StringBuilder outBuilder = new StringBuilder(); int startDex = 0; for (int m = 0; m < stringToEncode.length(); m++) { if (stringToEncode.charAt(m) == ' ') { if (m > startDex) { outBuilder.append(encode(stringToEncode.substring(startDex, m))); } outBuilder.append("%20"); startDex = m + 1; } } if (startDex != stringToEncode.length()) { outBuilder.append(encode(stringToEncode.substring(startDex))); } return outBuilder.toString(); } else { return encode(stringToEncode); } } /* * Helper method to reduce duplicate calls of URLEncoder.encode */ private static String encode(final String stringToEncode) { try { return URLEncoder.encode(stringToEncode, StandardCharsets.UTF_8.name()); } catch (UnsupportedEncodingException ex) { throw new RuntimeException(ex); } } /** * Performs a safe encoding of a url string, only encoding the path. * * @param url The url to encode. * @return The encoded url. */ public static String encodeUrlPath(String url) { /* Deconstruct the URL and reconstruct it making sure the path is encoded. */ UrlBuilder builder = UrlBuilder.parse(url); String path = builder.getPath(); if (path.startsWith("/")) { path = path.substring(1); } path = Utility.urlEncode(Utility.urlDecode(path)); builder.setPath(path); return builder.toString(); } /** * Given a String representing a date in a form of the ISO8601 pattern, generates a Date representing it with up to * millisecond precision. * * @param dateString the {@code String} to be interpreted as a <code>Date</code> * @return the corresponding <code>Date</code> object * @throws IllegalArgumentException If {@code dateString} doesn't match an ISO8601 pattern * @deprecated Use {@link StorageImplUtils */ @Deprecated public static OffsetDateTime parseDate(String dateString) { return StorageImplUtils.parseDateAndFormat(dateString).getDateTime(); } /** * A utility method for converting the input stream to Flux of ByteBuffer. Will check the equality of entity length * and the input length. * * @param data The input data which needs to convert to ByteBuffer. * @param length The expected input data length. * @param blockSize The size of each ByteBuffer. * @return {@link ByteBuffer} which contains the input data. * @throws UnexpectedLengthException when input data length mismatch input length. * @throws RuntimeException When I/O error occurs. */ public static Flux<ByteBuffer> convertStreamToByteBuffer(InputStream data, long length, int blockSize) { return convertStreamToByteBuffer(data, length, blockSize, true); } /** * A utility method for converting the input stream to Flux of ByteBuffer. Will check the equality of entity length * and the input length. * * Using markAndReset=true to force a seekable stream implies a buffering strategy is not being used, in which case * length is still needed for whatever underlying REST call is being streamed to. If markAndReset=false and data is * being buffered, consider using {@link com.azure.core.util.FluxUtil * does not require a data length. * * @param data The input data which needs to convert to ByteBuffer. * @param length The expected input data length. * @param blockSize The size of each ByteBuffer. * @param markAndReset Whether the stream needs to be marked and reset. This should generally always be true to * support retries. It is false in the case of buffered upload to support non markable streams because buffered * upload uses its own mechanisms to support retries. * @return {@link ByteBuffer} which contains the input data. * @throws UnexpectedLengthException when input data length mismatch input length. * @throws RuntimeException When I/O error occurs. */ public static Flux<ByteBuffer> convertStreamToByteBuffer(InputStream data, long length, int blockSize, boolean markAndReset) { if (markAndReset) { data.mark(Integer.MAX_VALUE); } if (length == 0) { try { if (data.read() != -1) { long totalLength = 1 + data.available(); throw LOGGER.logExceptionAsError(new UnexpectedLengthException( String.format("Request body emitted %d bytes, more than the expected %d bytes.", totalLength, length), totalLength, length)); } } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException("I/O errors occurred", e)); } } return Flux.defer(() -> { /* If the request needs to be retried, the flux will be resubscribed to. The stream and counter must be reset in order to correctly return the same data again. */ final long[] currentTotalLength = new long[1]; if (markAndReset) { try { data.reset(); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } return Flux.range(0, (int) Math.ceil((double) length / (double) blockSize)) .map(i -> i * blockSize) .concatMap(pos -> Mono.fromCallable(() -> { long count = pos + blockSize > length ? length - pos : blockSize; byte[] cache = new byte[(int) count]; int numOfBytes = 0; int offset = 0; int len = (int) count; while (numOfBytes != -1 && offset < count) { numOfBytes = data.read(cache, offset, len); if (numOfBytes != -1) { offset += numOfBytes; len -= numOfBytes; currentTotalLength[0] += numOfBytes; } } if (numOfBytes == -1 && currentTotalLength[0] < length) { throw LOGGER.logExceptionAsError(new UnexpectedLengthException( String.format("Request body emitted %d bytes, less than the expected %d bytes.", currentTotalLength[0], length), currentTotalLength[0], length)); } if (currentTotalLength[0] >= length) { try { if (data.read() != -1) { long totalLength = 1 + currentTotalLength[0] + data.available(); throw LOGGER.logExceptionAsError(new UnexpectedLengthException( String.format("Request body emitted %d bytes, more than the expected %d bytes.", totalLength, length), totalLength, length)); } else if (currentTotalLength[0] > length) { throw LOGGER.logExceptionAsError(new IllegalStateException( String.format("Read more data than was requested. Size of data read: %d. Size of data" + " requested: %d", currentTotalLength[0], length))); } } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException("I/O errors occurred", e)); } } return ByteBuffer.wrap(cache, 0, offset); })); }); } /** * Appends a query parameter to a url. * * @param url The url. * @param key The query key. * @param value The query value. * @return The updated url. */ public static String appendQueryParameter(String url, String key, String value) { if (url.contains("?")) { url = String.format("%s&%s=%s", url, key, value); } else { url = String.format("%s?%s=%s", url, key, value); } return url; } }
Need we test the case of disable auto comoplete here?
void customizeAutoComplete() { consumerProperties.setAutoComplete(true); assertTrue(consumerProperties.getAutoComplete()); }
consumerProperties.setAutoComplete(true);
void customizeAutoComplete() { consumerProperties.setAutoComplete(false); assertFalse(consumerProperties.getAutoComplete()); }
class ServiceBusConsumerPropertiesTests { private ServiceBusConsumerProperties consumerProperties; @BeforeEach void beforeEach() { consumerProperties = new ServiceBusConsumerProperties(); } @Test void autoCompleteDefaultTrue() { assertTrue(consumerProperties.getAutoComplete()); } @Test @Test void requeueRejectedDefaultsToFalse() { assertFalse(consumerProperties.isRequeueRejected()); } @Test void customRequeueRejected() { consumerProperties.setRequeueRejected(true); assertTrue(consumerProperties.isRequeueRejected()); } @Test void maxConcurrentCallsDefaults() { assertEquals(1, consumerProperties.getMaxConcurrentCalls()); } @Test void customMaxConcurrentCalls() { consumerProperties.setMaxConcurrentCalls(10); assertEquals(10, consumerProperties.getMaxConcurrentCalls()); } @Test void maxConcurrentSessionsDefaults() { assertNull(consumerProperties.getMaxConcurrentSessions()); } @Test void customMaxConcurrentSessions() { consumerProperties.setMaxConcurrentSessions(10); assertEquals(10, consumerProperties.getMaxConcurrentSessions()); } @Test void subQueueDefaults() { assertNotNull(consumerProperties.getSubQueue()); } @Test void customSubQueue() { consumerProperties.setSubQueue(SubQueue.DEAD_LETTER_QUEUE); assertEquals(SubQueue.DEAD_LETTER_QUEUE, consumerProperties.getSubQueue()); } @Test void receiveModeDefaults() { assertEquals(ServiceBusReceiveMode.PEEK_LOCK, consumerProperties.getReceiveMode()); } @Test void customReceiveMode() { consumerProperties.setReceiveMode(ServiceBusReceiveMode.RECEIVE_AND_DELETE); assertEquals(ServiceBusReceiveMode.RECEIVE_AND_DELETE, consumerProperties.getReceiveMode()); } @Test void maxAutoLockRenewDurationDefaults() { assertEquals(Duration.ofMinutes(5), consumerProperties.getMaxAutoLockRenewDuration()); } @Test void customMaxAutoLockRenewDuration() { Duration duration = Duration.ofMinutes(6); consumerProperties.setMaxAutoLockRenewDuration(duration); assertEquals(duration, consumerProperties.getMaxAutoLockRenewDuration()); } }
class ServiceBusConsumerPropertiesTests { private ServiceBusConsumerProperties consumerProperties; @BeforeEach void beforeEach() { consumerProperties = new ServiceBusConsumerProperties(); } @Test void autoCompleteDefaultTrue() { assertTrue(consumerProperties.getAutoComplete()); } @Test @Test void requeueRejectedDefaultsToFalse() { assertFalse(consumerProperties.isRequeueRejected()); } @Test void customRequeueRejected() { consumerProperties.setRequeueRejected(true); assertTrue(consumerProperties.isRequeueRejected()); } @Test void maxConcurrentCallsDefaults() { assertEquals(1, consumerProperties.getMaxConcurrentCalls()); } @Test void customMaxConcurrentCalls() { consumerProperties.setMaxConcurrentCalls(10); assertEquals(10, consumerProperties.getMaxConcurrentCalls()); } @Test void maxConcurrentSessionsDefaults() { assertNull(consumerProperties.getMaxConcurrentSessions()); } @Test void customMaxConcurrentSessions() { consumerProperties.setMaxConcurrentSessions(10); assertEquals(10, consumerProperties.getMaxConcurrentSessions()); } @Test void subQueueDefaults() { assertNotNull(consumerProperties.getSubQueue()); } @Test void customSubQueue() { consumerProperties.setSubQueue(SubQueue.DEAD_LETTER_QUEUE); assertEquals(SubQueue.DEAD_LETTER_QUEUE, consumerProperties.getSubQueue()); } @Test void receiveModeDefaults() { assertEquals(ServiceBusReceiveMode.PEEK_LOCK, consumerProperties.getReceiveMode()); } @Test void customReceiveMode() { consumerProperties.setReceiveMode(ServiceBusReceiveMode.RECEIVE_AND_DELETE); assertEquals(ServiceBusReceiveMode.RECEIVE_AND_DELETE, consumerProperties.getReceiveMode()); } @Test void maxAutoLockRenewDurationDefaults() { assertEquals(Duration.ofMinutes(5), consumerProperties.getMaxAutoLockRenewDuration()); } @Test void customMaxAutoLockRenewDuration() { Duration duration = Duration.ofMinutes(6); consumerProperties.setMaxAutoLockRenewDuration(duration); assertEquals(duration, consumerProperties.getMaxAutoLockRenewDuration()); } }
we should call this out in changelog.
public Mono<Void> uploadFromFile(String filePath, boolean overwrite) { try { Mono<Void> overwriteCheck = Mono.empty(); BlobRequestConditions requestConditions = null; if (!overwrite) { if (UploadUtils.shouldUploadInChunks(filePath, (long) BlockBlobAsyncClient.MAX_UPLOAD_BLOB_BYTES, logger)) { overwriteCheck = exists().flatMap(exists -> exists ? monoError(logger, new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS)) : Mono.empty()); } requestConditions = new BlobRequestConditions().setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD); } return overwriteCheck.then(uploadFromFile(filePath, null, null, null, null, requestConditions)); } catch (RuntimeException ex) { return monoError(logger, ex); } }
(long) BlockBlobAsyncClient.MAX_UPLOAD_BLOB_BYTES, logger)) {
public Mono<Void> uploadFromFile(String filePath, boolean overwrite) { Mono<Void> overwriteCheck = Mono.empty(); BlobRequestConditions requestConditions = null; if (!overwrite) { if (UploadUtils.shouldUploadInChunks(filePath, ModelHelper.BLOB_DEFAULT_MAX_SINGLE_UPLOAD_SIZE, LOGGER)) { overwriteCheck = exists().flatMap(exists -> exists ? monoError(LOGGER, new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS)) : Mono.empty()); } requestConditions = new BlobRequestConditions().setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD); } return overwriteCheck.then(uploadFromFile(filePath, null, null, null, null, requestConditions)); }
class BlobAsyncClient extends BlobAsyncClientBase { /** * The block size to use if none is specified in parallel operations. */ public static final int BLOB_DEFAULT_UPLOAD_BLOCK_SIZE = 4 * Constants.MB; /** * The number of buffers to use if none is specified on the buffered upload method. */ public static final int BLOB_DEFAULT_NUMBER_OF_BUFFERS = 8; /** * If a blob is known to be greater than 100MB, using a larger block size will trigger some server-side * optimizations. If the block size is not set and the size of the blob is known to be greater than 100MB, this * value will be used. */ public static final int BLOB_DEFAULT_HTBB_UPLOAD_BLOCK_SIZE = 8 * Constants.MB; static final long BLOB_MAX_UPLOAD_BLOCK_SIZE = 4000L * Constants.MB; private final ClientLogger logger = new ClientLogger(BlobAsyncClient.class); private BlockBlobAsyncClient blockBlobAsyncClient; private AppendBlobAsyncClient appendBlobAsyncClient; private PageBlobAsyncClient pageBlobAsyncClient; /** * Protected constructor for use by {@link BlobClientBuilder}. * * @param pipeline The pipeline used to send and receive service requests. * @param url The endpoint where to send service requests. * @param serviceVersion The version of the service to receive requests. * @param accountName The storage account name. * @param containerName The container name. * @param blobName The blob name. * @param snapshot The snapshot identifier for the blob, pass {@code null} to interact with the blob directly. * @param customerProvidedKey Customer provided key used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. */ protected BlobAsyncClient(HttpPipeline pipeline, String url, BlobServiceVersion serviceVersion, String accountName, String containerName, String blobName, String snapshot, CpkInfo customerProvidedKey) { super(pipeline, url, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey); } /** * Protected constructor for use by {@link BlobClientBuilder}. * * @param pipeline The pipeline used to send and receive service requests. * @param url The endpoint where to send service requests. * @param serviceVersion The version of the service to receive requests. * @param accountName The storage account name. * @param containerName The container name. * @param blobName The blob name. * @param snapshot The snapshot identifier for the blob, pass {@code null} to interact with the blob directly. * @param customerProvidedKey Customer provided key used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. * @param encryptionScope Encryption scope used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. */ protected BlobAsyncClient(HttpPipeline pipeline, String url, BlobServiceVersion serviceVersion, String accountName, String containerName, String blobName, String snapshot, CpkInfo customerProvidedKey, EncryptionScope encryptionScope) { super(pipeline, url, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey, encryptionScope); } /** * Protected constructor for use by {@link BlobClientBuilder}. * * @param pipeline The pipeline used to send and receive service requests. * @param url The endpoint where to send service requests. * @param serviceVersion The version of the service to receive requests. * @param accountName The storage account name. * @param containerName The container name. * @param blobName The blob name. * @param snapshot The snapshot identifier for the blob, pass {@code null} to interact with the blob directly. * @param customerProvidedKey Customer provided key used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. * @param encryptionScope Encryption scope used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. * @param versionId The version identifier for the blob, pass {@code null} to interact with the latest blob version. */ protected BlobAsyncClient(HttpPipeline pipeline, String url, BlobServiceVersion serviceVersion, String accountName, String containerName, String blobName, String snapshot, CpkInfo customerProvidedKey, EncryptionScope encryptionScope, String versionId) { super(pipeline, url, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey, encryptionScope, versionId); } /** * Creates a new {@link BlobAsyncClient} linked to the {@code snapshot} of this blob resource. * * @param snapshot the identifier for a specific snapshot of this blob * @return A {@link BlobAsyncClient} used to interact with the specific snapshot. */ @Override public BlobAsyncClient getSnapshotClient(String snapshot) { return new BlobAsyncClient(getHttpPipeline(), getAccountUrl(), getServiceVersion(), getAccountName(), getContainerName(), getBlobName(), snapshot, getCustomerProvidedKey(), encryptionScope, getVersionId()); } /** * Creates a new {@link BlobAsyncClient} linked to the {@code versionId} of this blob resource. * * @param versionId the identifier for a specific version of this blob, * pass {@code null} to interact with the latest blob version. * @return A {@link BlobAsyncClient} used to interact with the specific version. */ @Override public BlobAsyncClient getVersionClient(String versionId) { return new BlobAsyncClient(getHttpPipeline(), getAccountUrl(), getServiceVersion(), getAccountName(), getContainerName(), getBlobName(), getSnapshotId(), getCustomerProvidedKey(), encryptionScope, versionId); } /** * Creates a new {@link BlobAsyncClient} with the specified {@code encryptionScope}. * * @param encryptionScope the encryption scope for the blob, pass {@code null} to use no encryption scope. * @return a {@link BlobAsyncClient} with the specified {@code encryptionScope}. */ @Override public BlobAsyncClient getEncryptionScopeAsyncClient(String encryptionScope) { EncryptionScope finalEncryptionScope = null; if (encryptionScope != null) { finalEncryptionScope = new EncryptionScope().setEncryptionScope(encryptionScope); } return new BlobAsyncClient(getHttpPipeline(), getAccountUrl(), getServiceVersion(), getAccountName(), getContainerName(), getBlobName(), getSnapshotId(), getCustomerProvidedKey(), finalEncryptionScope, getVersionId()); } /** * Creates a new {@link BlobAsyncClient} with the specified {@code customerProvidedKey}. * * @param customerProvidedKey the {@link CustomerProvidedKey} for the blob, * pass {@code null} to use no customer provided key. * @return a {@link BlobAsyncClient} with the specified {@code customerProvidedKey}. */ @Override public BlobAsyncClient getCustomerProvidedKeyAsyncClient(CustomerProvidedKey customerProvidedKey) { CpkInfo finalCustomerProvidedKey = null; if (customerProvidedKey != null) { finalCustomerProvidedKey = new CpkInfo() .setEncryptionKey(customerProvidedKey.getKey()) .setEncryptionKeySha256(customerProvidedKey.getKeySha256()) .setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm()); } return new BlobAsyncClient(getHttpPipeline(), getAccountUrl(), getServiceVersion(), getAccountName(), getContainerName(), getBlobName(), getSnapshotId(), finalCustomerProvidedKey, encryptionScope, getVersionId()); } /** * Creates a new {@link AppendBlobAsyncClient} associated with this blob. * * @return A {@link AppendBlobAsyncClient} associated with this blob. */ public AppendBlobAsyncClient getAppendBlobAsyncClient() { if (appendBlobAsyncClient == null) { appendBlobAsyncClient = prepareBuilder().buildAppendBlobAsyncClient(); } return appendBlobAsyncClient; } /** * Creates a new {@link BlockBlobAsyncClient} associated with this blob. * * @return A {@link BlockBlobAsyncClient} associated with this blob. */ public BlockBlobAsyncClient getBlockBlobAsyncClient() { if (blockBlobAsyncClient == null) { blockBlobAsyncClient = prepareBuilder().buildBlockBlobAsyncClient(); } return blockBlobAsyncClient; } /** * Creates a new {@link PageBlobAsyncClient} associated with this blob. * * @return A {@link PageBlobAsyncClient} associated with this blob. */ public PageBlobAsyncClient getPageBlobAsyncClient() { if (pageBlobAsyncClient == null) { pageBlobAsyncClient = prepareBuilder().buildPageBlobAsyncClient(); } return pageBlobAsyncClient; } private SpecializedBlobClientBuilder prepareBuilder() { SpecializedBlobClientBuilder builder = new SpecializedBlobClientBuilder() .pipeline(getHttpPipeline()) .endpoint(getBlobUrl()) .snapshot(getSnapshotId()) .serviceVersion(getServiceVersion()); CpkInfo cpk = getCustomerProvidedKey(); if (cpk != null) { builder.customerProvidedKey(new CustomerProvidedKey(cpk.getEncryptionKey())); } if (encryptionScope != null) { builder.encryptionScope(encryptionScope.getEncryptionScope()); } return builder; } /** * Creates a new block blob. By default this method will not overwrite an existing blob. * <p> * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported * with this method; the content of the existing blob is overwritten with the new content. To perform a partial * update of a block blob's, use {@link BlockBlobAsyncClient * BlockBlobAsyncClient * <a href="https: * <a href="https: * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method does * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.BlobAsyncClient.upload * <pre> * ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions& * .setBlockSizeLong& * .setMaxConcurrency& * client.upload& * System.out.printf& * Base64.getEncoder& * </pre> * <!-- end com.azure.storage.blob.BlobAsyncClient.upload * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @return A reactive response containing the information of the uploaded block blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<BlockBlobItem> upload(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions) { try { return upload(data, parallelTransferOptions, false); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob. * <p> * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported * with this method; the content of the existing blob is overwritten with the new content. To perform a partial * update of a block blob's, use {@link BlockBlobAsyncClient * BlockBlobAsyncClient * <a href="https: * <a href="https: * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method does * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.BlobAsyncClient.upload * <pre> * ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions& * .setBlockSizeLong& * .setMaxConcurrency& * boolean overwrite = false; & * client.upload& * System.out.printf& * Base64.getEncoder& * </pre> * <!-- end com.azure.storage.blob.BlobAsyncClient.upload * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @param overwrite Whether or not to overwrite, should the blob already exist. * @return A reactive response containing the information of the uploaded block blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<BlockBlobItem> upload(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions, boolean overwrite) { try { Mono<Void> overwriteCheck; BlobRequestConditions requestConditions; if (overwrite) { overwriteCheck = Mono.empty(); requestConditions = null; } else { overwriteCheck = exists().flatMap(exists -> exists ? monoError(logger, new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS)) : Mono.empty()); requestConditions = new BlobRequestConditions().setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD); } return overwriteCheck .then(uploadWithResponse(data, parallelTransferOptions, null, null, null, requestConditions)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob. By default this method will not overwrite an existing blob. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.BlobAsyncClient.upload * <pre> * client.upload& * System.out.printf& * Base64.getEncoder& * </pre> * <!-- end com.azure.storage.blob.BlobAsyncClient.upload * * @param data The data to write to the blob. * @return A reactive response containing the information of the uploaded block blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<BlockBlobItem> upload(BinaryData data) { try { return upload(data, false); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.BlobAsyncClient.upload * <pre> * boolean overwrite = false; & * client.upload& * System.out.printf& * Base64.getEncoder& * </pre> * <!-- end com.azure.storage.blob.BlobAsyncClient.upload * * @param data The data to write to the blob. * @param overwrite Whether or not to overwrite, should the blob already exist. * @return A reactive response containing the information of the uploaded block blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<BlockBlobItem> upload(BinaryData data, boolean overwrite) { try { Mono<Void> overwriteCheck; BlobRequestConditions requestConditions; if (overwrite) { overwriteCheck = Mono.empty(); requestConditions = null; } else { overwriteCheck = exists().flatMap(exists -> exists ? monoError(logger, new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS)) : Mono.empty()); requestConditions = new BlobRequestConditions().setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD); } return overwriteCheck .then(uploadWithResponse(Flux.just(data.toByteBuffer()), null, null, null, null, requestConditions)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob. * <p> * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported * with this method; the content of the existing blob is overwritten with the new content. To perform a partial * update of a block blob's, use {@link BlockBlobAsyncClient * BlockBlobAsyncClient * <a href="https: * <a href="https: * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method does * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * <p> * To avoid overwriting, pass "*" to {@link BlobRequestConditions * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.BlobAsyncClient.uploadWithResponse * <pre> * BlobHttpHeaders headers = new BlobHttpHeaders& * .setContentMd5& * .setContentLanguage& * .setContentType& * * Map&lt;String, String&gt; metadata = Collections.singletonMap& * BlobRequestConditions requestConditions = new BlobRequestConditions& * .setLeaseId& * .setIfUnmodifiedSince& * * ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions& * .setBlockSizeLong& * .setMaxConcurrency& * * client.uploadWithResponse& * .subscribe& * Base64.getEncoder& * </pre> * <!-- end com.azure.storage.blob.BlobAsyncClient.uploadWithResponse * * <p><strong>Using Progress Reporting</strong></p> * * <!-- src_embed com.azure.storage.blob.BlobAsyncClient.uploadWithResponse * <pre> * BlobHttpHeaders headers = new BlobHttpHeaders& * .setContentMd5& * .setContentLanguage& * .setContentType& * * Map&lt;String, String&gt; metadata = Collections.singletonMap& * BlobRequestConditions requestConditions = new BlobRequestConditions& * .setLeaseId& * .setIfUnmodifiedSince& * * ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions& * .setBlockSizeLong& * .setMaxConcurrency& * .setProgressReceiver& * * client.uploadWithResponse& * .subscribe& * Base64.getEncoder& * </pre> * <!-- end com.azure.storage.blob.BlobAsyncClient.uploadWithResponse * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @param headers {@link BlobHttpHeaders} * @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any * metadata key or value, it must be removed or encoded. * @param tier {@link AccessTier} for the destination blob. * @param requestConditions {@link BlobRequestConditions} * @return A reactive response containing the information of the uploaded block blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<BlockBlobItem>> uploadWithResponse(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions, BlobHttpHeaders headers, Map<String, String> metadata, AccessTier tier, BlobRequestConditions requestConditions) { return this.uploadWithResponse(new BlobParallelUploadOptions(data) .setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata).setTier(tier) .setRequestConditions(requestConditions)); } /** * Creates a new block blob, or updates the content of an existing block blob. * <p> * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported * with this method; the content of the existing blob is overwritten with the new content. To perform a partial * update of a block blob's, use {@link BlockBlobAsyncClient * BlockBlobAsyncClient * <a href="https: * <a href="https: * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method does * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * <p> * To avoid overwriting, pass "*" to {@link BlobRequestConditions * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.BlobAsyncClient.uploadWithResponse * <pre> * BlobHttpHeaders headers = new BlobHttpHeaders& * .setContentMd5& * .setContentLanguage& * .setContentType& * * Map&lt;String, String&gt; metadata = Collections.singletonMap& * Map&lt;String, String&gt; tags = Collections.singletonMap& * BlobRequestConditions requestConditions = new BlobRequestConditions& * .setLeaseId& * .setIfUnmodifiedSince& * * ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions& * .setMaxConcurrency& * System.out.printf& * * client.uploadWithResponse& * .setParallelTransferOptions& * .setTier& * .subscribe& * Base64.getEncoder& * </pre> * <!-- end com.azure.storage.blob.BlobAsyncClient.uploadWithResponse * * <p><strong>Using Progress Reporting</strong></p> * * <!-- src_embed com.azure.storage.blob.BlobAsyncClient.uploadWithResponse * <pre> * BlobHttpHeaders headers = new BlobHttpHeaders& * .setContentMd5& * .setContentLanguage& * .setContentType& * * Map&lt;String, String&gt; metadata = Collections.singletonMap& * Map&lt;String, String&gt; tags = Collections.singletonMap& * BlobRequestConditions requestConditions = new BlobRequestConditions& * .setLeaseId& * .setIfUnmodifiedSince& * * ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions& * .setMaxConcurrency& * System.out.printf& * * client.uploadWithResponse& * .setParallelTransferOptions& * .setTier& * .subscribe& * Base64.getEncoder& * </pre> * <!-- end com.azure.storage.blob.BlobAsyncClient.uploadWithResponse * * @param options {@link BlobParallelUploadOptions}. Unlike other upload methods, this method does not require that * the {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not * expected to produce the same values across subscriptions. * @return A reactive response containing the information of the uploaded block blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<BlockBlobItem>> uploadWithResponse(BlobParallelUploadOptions options) { /* The following is catalogue of all the places we allocate memory/copy in any upload method a justifaction for that case current as of 1/13/21. - Async buffered upload chunked upload: We used an UploadBufferPool. This will allocate memory as needed up to the configured maximum. This is necessary to support replayability on retires. Each flux to come out of the pool is a Flux.just() of up to two deep copied buffers, so it is replayable. It also allows us to optimize the upload by uploading the maximum amount per block. Finally, in the case of chunked uploading, it allows the customer to pass data without knowing the size. Note that full upload does not need a deep copy because the Flux emitted by the PayloadSizeGate in the full upload case is already replayable and the length is maintained by the gate. - Sync buffered upload: converting the input stream to a flux involves creating a buffer for each stream read. Using a new buffer per read ensures that the reads are safe and not overwriting data in buffers that were passed to the async upload but have not yet been sent. This covers both full and chunked uploads in the sync case. - BlobOutputStream: A deep copy is made of any buffer passed to write. While async copy does streamline our code and allow for some potential parallelization, this extra copy is necessary to ensure that customers writing to the stream in a tight loop are not overwriting data previously given to the stream before it has been sent. Taken together, these should support retries and protect against data being overwritten in all upload scenarios. One note is that there is no deep copy in the uploadFull method. This is unnecessary as explained in uploadFullOrChunked because the Flux coming out of the size gate in that case is already replayable and reusing buffers is not a common scenario for async like it is in sync (and we already buffer in sync to convert from a stream). */ try { StorageImplUtils.assertNotNull("options", options); final ParallelTransferOptions parallelTransferOptions = ModelHelper.populateAndApplyDefaults(options.getParallelTransferOptions()); final BlobHttpHeaders headers = options.getHeaders(); final Map<String, String> metadata = options.getMetadata(); final Map<String, String> tags = options.getTags(); final AccessTier tier = options.getTier(); final BlobRequestConditions requestConditions = options.getRequestConditions() == null ? new BlobRequestConditions() : options.getRequestConditions(); final boolean computeMd5 = options.isComputeMd5(); final BlobImmutabilityPolicy immutabilityPolicy = options.getImmutabilityPolicy() == null ? new BlobImmutabilityPolicy() : options.getImmutabilityPolicy(); final Boolean legalHold = options.isLegalHold(); BlockBlobAsyncClient blockBlobAsyncClient = getBlockBlobAsyncClient(); Function<Flux<ByteBuffer>, Mono<Response<BlockBlobItem>>> uploadInChunksFunction = (stream) -> uploadInChunks(blockBlobAsyncClient, stream, parallelTransferOptions, headers, metadata, tags, tier, requestConditions, computeMd5, immutabilityPolicy, legalHold); BiFunction<Flux<ByteBuffer>, Long, Mono<Response<BlockBlobItem>>> uploadFullBlobFunction = (stream, length) -> uploadFullBlob(blockBlobAsyncClient, stream, length, parallelTransferOptions, headers, metadata, tags, tier, requestConditions, computeMd5, immutabilityPolicy, legalHold); Flux<ByteBuffer> data = options.getDataFlux(); if (data == null && options.getOptionalLength() == null) { int chunkSize = (int) Math.min(Constants.MAX_INPUT_STREAM_CONVERTER_BUFFER_LENGTH, parallelTransferOptions.getBlockSizeLong()); data = FluxUtil.toFluxByteBuffer(options.getDataStream(), chunkSize); } else if (data == null) { int chunkSize = (int) Math.min(Constants.MAX_INPUT_STREAM_CONVERTER_BUFFER_LENGTH, parallelTransferOptions.getBlockSizeLong()); data = Utility.convertStreamToByteBuffer( options.getDataStream(), options.getOptionalLength(), chunkSize, false); } return UploadUtils.uploadFullOrChunked(data, ModelHelper.wrapBlobOptions(parallelTransferOptions), uploadInChunksFunction, uploadFullBlobFunction); } catch (RuntimeException ex) { return monoError(logger, ex); } } private Mono<Response<BlockBlobItem>> uploadFullBlob(BlockBlobAsyncClient blockBlobAsyncClient, Flux<ByteBuffer> data, long length, ParallelTransferOptions parallelTransferOptions, BlobHttpHeaders headers, Map<String, String> metadata, Map<String, String> tags, AccessTier tier, BlobRequestConditions requestConditions, boolean computeMd5, BlobImmutabilityPolicy immutabilityPolicy, Boolean legalHold) { /* Note that there is no need to buffer here as the flux returned by the size gate in this case is created from an iterable and is therefore replayable. */ Flux<ByteBuffer> progressData = ProgressReporter.addProgressReporting(data, parallelTransferOptions.getProgressReceiver()); return UploadUtils.computeMd5(progressData, computeMd5, logger) .map(fluxMd5Wrapper -> new BlockBlobSimpleUploadOptions(fluxMd5Wrapper.getData(), length) .setHeaders(headers) .setMetadata(metadata) .setTags(tags) .setTier(tier) .setRequestConditions(requestConditions) .setContentMd5(fluxMd5Wrapper.getMd5()) .setImmutabilityPolicy(immutabilityPolicy) .setLegalHold(legalHold)) .flatMap(blockBlobAsyncClient::uploadWithResponse); } private Mono<Response<BlockBlobItem>> uploadInChunks(BlockBlobAsyncClient blockBlobAsyncClient, Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions, BlobHttpHeaders headers, Map<String, String> metadata, Map<String, String> tags, AccessTier tier, BlobRequestConditions requestConditions, boolean computeMd5, BlobImmutabilityPolicy immutabilityPolicy, Boolean legalHold) { AtomicLong totalProgress = new AtomicLong(); Lock progressLock = new ReentrantLock(); BufferStagingArea stagingArea = new BufferStagingArea(parallelTransferOptions.getBlockSizeLong(), BlockBlobClient.MAX_STAGE_BLOCK_BYTES_LONG); Flux<ByteBuffer> chunkedSource = UploadUtils.chunkSource(data, ModelHelper.wrapBlobOptions(parallelTransferOptions)); /* Write to the pool and upload the output. maxConcurrency = 1 when writing means only 1 BufferAggregator will be accumulating at a time. parallelTransferOptions.getMaxConcurrency() appends will be happening at once, so we guarantee buffering of only concurrency + 1 chunks at a time. */ return chunkedSource.flatMapSequential(stagingArea::write, 1, 1) .concatWith(Flux.defer(stagingArea::flush)) .flatMapSequential(bufferAggregator -> { Flux<ByteBuffer> progressData = ProgressReporter.addParallelProgressReporting( bufferAggregator.asFlux(), parallelTransferOptions.getProgressReceiver(), progressLock, totalProgress); final String blockId = Base64.getEncoder().encodeToString(UUID.randomUUID().toString().getBytes(UTF_8)); return UploadUtils.computeMd5(progressData, computeMd5, logger) .flatMap(fluxMd5Wrapper -> blockBlobAsyncClient.stageBlockWithResponse(blockId, fluxMd5Wrapper.getData(), bufferAggregator.length(), fluxMd5Wrapper.getMd5(), requestConditions.getLeaseId())) .map(x -> blockId) .flux(); }, parallelTransferOptions.getMaxConcurrency(), 1) .collect(Collectors.toList()) .flatMap(ids -> blockBlobAsyncClient.commitBlockListWithResponse(new BlockBlobCommitBlockListOptions(ids) .setHeaders(headers).setMetadata(metadata).setTags(tags).setTier(tier) .setRequestConditions(requestConditions).setImmutabilityPolicy(immutabilityPolicy) .setLegalHold(legalHold))); } /** * Creates a new block blob with the content of the specified file. By default this method will not overwrite an * existing blob. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.BlobAsyncClient.uploadFromFile * <pre> * client.uploadFromFile& * .doOnError& * .subscribe& * </pre> * <!-- end com.azure.storage.blob.BlobAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @return An empty response * @throws UncheckedIOException If an I/O error occurs */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> uploadFromFile(String filePath) { try { return uploadFromFile(filePath, false); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob, with the content of the specified * file. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.BlobAsyncClient.uploadFromFile * <pre> * boolean overwrite = false; & * client.uploadFromFile& * .doOnError& * .subscribe& * </pre> * <!-- end com.azure.storage.blob.BlobAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @param overwrite Whether or not to overwrite, should the blob already exist. * @return An empty response * @throws UncheckedIOException If an I/O error occurs */ @ServiceMethod(returns = ReturnType.SINGLE) /** * Creates a new block blob, or updates the content of an existing block blob, with the content of the specified * file. * <p> * To avoid overwriting, pass "*" to {@link BlobRequestConditions * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.BlobAsyncClient.uploadFromFile * <pre> * BlobHttpHeaders headers = new BlobHttpHeaders& * .setContentMd5& * .setContentLanguage& * .setContentType& * * Map&lt;String, String&gt; metadata = Collections.singletonMap& * BlobRequestConditions requestConditions = new BlobRequestConditions& * .setLeaseId& * .setIfUnmodifiedSince& * * client.uploadFromFile& * new ParallelTransferOptions& * headers, metadata, AccessTier.HOT, requestConditions& * .doOnError& * .subscribe& * </pre> * <!-- end com.azure.storage.blob.BlobAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @param parallelTransferOptions {@link ParallelTransferOptions} to use to upload from file. Number of parallel * transfers parameter is ignored. * @param headers {@link BlobHttpHeaders} * @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any * metadata key or value, it must be removed or encoded. * @param tier {@link AccessTier} for the destination blob. * @param requestConditions {@link BlobRequestConditions} * @return An empty response * @throws UncheckedIOException If an I/O error occurs */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> uploadFromFile(String filePath, ParallelTransferOptions parallelTransferOptions, BlobHttpHeaders headers, Map<String, String> metadata, AccessTier tier, BlobRequestConditions requestConditions) { return this.uploadFromFileWithResponse(new BlobUploadFromFileOptions(filePath) .setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata) .setTier(tier).setRequestConditions(requestConditions)) .then(); } /** * Creates a new block blob, or updates the content of an existing block blob, with the content of the specified * file. * <p> * To avoid overwriting, pass "*" to {@link BlobRequestConditions * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.BlobAsyncClient.uploadFromFileWithResponse * <pre> * BlobHttpHeaders headers = new BlobHttpHeaders& * .setContentMd5& * .setContentLanguage& * .setContentType& * * Map&lt;String, String&gt; metadata = Collections.singletonMap& * Map&lt;String, String&gt; tags = Collections.singletonMap& * BlobRequestConditions requestConditions = new BlobRequestConditions& * .setLeaseId& * .setIfUnmodifiedSince& * * client.uploadFromFileWithResponse& * .setParallelTransferOptions& * new ParallelTransferOptions& * .setHeaders& * .setRequestConditions& * .doOnError& * .subscribe& * </pre> * <!-- end com.azure.storage.blob.BlobAsyncClient.uploadFromFileWithResponse * * @param options {@link BlobUploadFromFileOptions} * @return A reactive response containing the information of the uploaded block blob. * @throws UncheckedIOException If an I/O error occurs */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<BlockBlobItem>> uploadFromFileWithResponse(BlobUploadFromFileOptions options) { StorageImplUtils.assertNotNull("options", options); Long originalBlockSize = (options.getParallelTransferOptions() == null) ? null : options.getParallelTransferOptions().getBlockSizeLong(); final ParallelTransferOptions finalParallelTransferOptions = ModelHelper.populateAndApplyDefaults(options.getParallelTransferOptions()); try { return Mono.using(() -> UploadUtils.uploadFileResourceSupplier(options.getFilePath(), logger), channel -> { try { BlockBlobAsyncClient blockBlobAsyncClient = getBlockBlobAsyncClient(); long fileSize = channel.size(); if (UploadUtils.shouldUploadInChunks(options.getFilePath(), finalParallelTransferOptions.getMaxSingleUploadSizeLong(), logger)) { return uploadFileChunks(fileSize, finalParallelTransferOptions, originalBlockSize, options.getHeaders(), options.getMetadata(), options.getTags(), options.getTier(), options.getRequestConditions(), channel, blockBlobAsyncClient); } else { Flux<ByteBuffer> data = FluxUtil.readFile(channel); if (finalParallelTransferOptions.getProgressReceiver() != null) { data = ProgressReporter.addProgressReporting(data, finalParallelTransferOptions.getProgressReceiver()); } return blockBlobAsyncClient.uploadWithResponse( new BlockBlobSimpleUploadOptions(data, fileSize).setHeaders(options.getHeaders()) .setMetadata(options.getMetadata()).setTags(options.getTags()) .setTier(options.getTier()) .setRequestConditions(options.getRequestConditions())); } } catch (IOException ex) { return Mono.error(ex); } }, channel -> UploadUtils.uploadFileCleanup(channel, logger)); } catch (RuntimeException ex) { return monoError(logger, ex); } } private Mono<Response<BlockBlobItem>> uploadFileChunks( long fileSize, ParallelTransferOptions parallelTransferOptions, Long originalBlockSize, BlobHttpHeaders headers, Map<String, String> metadata, Map<String, String> tags, AccessTier tier, BlobRequestConditions requestConditions, AsynchronousFileChannel channel, BlockBlobAsyncClient client) { final BlobRequestConditions finalRequestConditions = (requestConditions == null) ? new BlobRequestConditions() : requestConditions; AtomicLong totalProgress = new AtomicLong(); Lock progressLock = new ReentrantLock(); final SortedMap<Long, String> blockIds = new TreeMap<>(); return Flux.fromIterable(sliceFile(fileSize, originalBlockSize, parallelTransferOptions.getBlockSizeLong())) .flatMap(chunk -> { String blockId = getBlockID(); blockIds.put(chunk.getOffset(), blockId); Flux<ByteBuffer> progressData = ProgressReporter.addParallelProgressReporting( FluxUtil.readFile(channel, chunk.getOffset(), chunk.getCount()), parallelTransferOptions.getProgressReceiver(), progressLock, totalProgress); return client.stageBlockWithResponse(blockId, progressData, chunk.getCount(), null, finalRequestConditions.getLeaseId()); }, parallelTransferOptions.getMaxConcurrency()) .then(Mono.defer(() -> client.commitBlockListWithResponse( new BlockBlobCommitBlockListOptions(new ArrayList<>(blockIds.values())) .setHeaders(headers).setMetadata(metadata).setTags(tags).setTier(tier) .setRequestConditions(finalRequestConditions)))); } /** * RESERVED FOR INTERNAL USE. * * Resource Supplier for UploadFile. * * @param filePath The path for the file * @return {@code AsynchronousFileChannel} * @throws UncheckedIOException an input output exception. * @deprecated due to refactoring code to be in the common storage library. */ @Deprecated protected AsynchronousFileChannel uploadFileResourceSupplier(String filePath) { return UploadUtils.uploadFileResourceSupplier(filePath, logger); } private String getBlockID() { return Base64.getEncoder().encodeToString(UUID.randomUUID().toString().getBytes(StandardCharsets.UTF_8)); } private List<BlobRange> sliceFile(long fileSize, Long originalBlockSize, long blockSize) { List<BlobRange> ranges = new ArrayList<>(); if (fileSize > 100 * Constants.MB && originalBlockSize == null) { blockSize = BLOB_DEFAULT_HTBB_UPLOAD_BLOCK_SIZE; } for (long pos = 0; pos < fileSize; pos += blockSize) { long count = blockSize; if (pos + count > fileSize) { count = fileSize - pos; } ranges.add(new BlobRange(pos, count)); } return ranges; } }
class BlobAsyncClient extends BlobAsyncClientBase { /** * The block size to use if none is specified in parallel operations. */ public static final int BLOB_DEFAULT_UPLOAD_BLOCK_SIZE = 4 * Constants.MB; /** * The number of buffers to use if none is specified on the buffered upload method. */ public static final int BLOB_DEFAULT_NUMBER_OF_BUFFERS = 8; /** * If a blob is known to be greater than 100MB, using a larger block size will trigger some server-side * optimizations. If the block size is not set and the size of the blob is known to be greater than 100MB, this * value will be used. */ public static final int BLOB_DEFAULT_HTBB_UPLOAD_BLOCK_SIZE = 8 * Constants.MB; static final long BLOB_MAX_UPLOAD_BLOCK_SIZE = 4000L * Constants.MB; private static final ClientLogger LOGGER = new ClientLogger(BlobAsyncClient.class); private BlockBlobAsyncClient blockBlobAsyncClient; private AppendBlobAsyncClient appendBlobAsyncClient; private PageBlobAsyncClient pageBlobAsyncClient; /** * Protected constructor for use by {@link BlobClientBuilder}. * * @param pipeline The pipeline used to send and receive service requests. * @param url The endpoint where to send service requests. * @param serviceVersion The version of the service to receive requests. * @param accountName The storage account name. * @param containerName The container name. * @param blobName The blob name. * @param snapshot The snapshot identifier for the blob, pass {@code null} to interact with the blob directly. * @param customerProvidedKey Customer provided key used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. */ protected BlobAsyncClient(HttpPipeline pipeline, String url, BlobServiceVersion serviceVersion, String accountName, String containerName, String blobName, String snapshot, CpkInfo customerProvidedKey) { super(pipeline, url, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey); } /** * Protected constructor for use by {@link BlobClientBuilder}. * * @param pipeline The pipeline used to send and receive service requests. * @param url The endpoint where to send service requests. * @param serviceVersion The version of the service to receive requests. * @param accountName The storage account name. * @param containerName The container name. * @param blobName The blob name. * @param snapshot The snapshot identifier for the blob, pass {@code null} to interact with the blob directly. * @param customerProvidedKey Customer provided key used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. * @param encryptionScope Encryption scope used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. */ protected BlobAsyncClient(HttpPipeline pipeline, String url, BlobServiceVersion serviceVersion, String accountName, String containerName, String blobName, String snapshot, CpkInfo customerProvidedKey, EncryptionScope encryptionScope) { super(pipeline, url, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey, encryptionScope); } /** * Protected constructor for use by {@link BlobClientBuilder}. * * @param pipeline The pipeline used to send and receive service requests. * @param url The endpoint where to send service requests. * @param serviceVersion The version of the service to receive requests. * @param accountName The storage account name. * @param containerName The container name. * @param blobName The blob name. * @param snapshot The snapshot identifier for the blob, pass {@code null} to interact with the blob directly. * @param customerProvidedKey Customer provided key used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. * @param encryptionScope Encryption scope used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. * @param versionId The version identifier for the blob, pass {@code null} to interact with the latest blob version. */ protected BlobAsyncClient(HttpPipeline pipeline, String url, BlobServiceVersion serviceVersion, String accountName, String containerName, String blobName, String snapshot, CpkInfo customerProvidedKey, EncryptionScope encryptionScope, String versionId) { super(pipeline, url, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey, encryptionScope, versionId); } /** * Creates a new {@link BlobAsyncClient} linked to the {@code snapshot} of this blob resource. * * @param snapshot the identifier for a specific snapshot of this blob * @return A {@link BlobAsyncClient} used to interact with the specific snapshot. */ @Override public BlobAsyncClient getSnapshotClient(String snapshot) { return new BlobAsyncClient(getHttpPipeline(), getAccountUrl(), getServiceVersion(), getAccountName(), getContainerName(), getBlobName(), snapshot, getCustomerProvidedKey(), encryptionScope, getVersionId()); } /** * Creates a new {@link BlobAsyncClient} linked to the {@code versionId} of this blob resource. * * @param versionId the identifier for a specific version of this blob, * pass {@code null} to interact with the latest blob version. * @return A {@link BlobAsyncClient} used to interact with the specific version. */ @Override public BlobAsyncClient getVersionClient(String versionId) { return new BlobAsyncClient(getHttpPipeline(), getAccountUrl(), getServiceVersion(), getAccountName(), getContainerName(), getBlobName(), getSnapshotId(), getCustomerProvidedKey(), encryptionScope, versionId); } /** * Creates a new {@link BlobAsyncClient} with the specified {@code encryptionScope}. * * @param encryptionScope the encryption scope for the blob, pass {@code null} to use no encryption scope. * @return a {@link BlobAsyncClient} with the specified {@code encryptionScope}. */ @Override public BlobAsyncClient getEncryptionScopeAsyncClient(String encryptionScope) { EncryptionScope finalEncryptionScope = null; if (encryptionScope != null) { finalEncryptionScope = new EncryptionScope().setEncryptionScope(encryptionScope); } return new BlobAsyncClient(getHttpPipeline(), getAccountUrl(), getServiceVersion(), getAccountName(), getContainerName(), getBlobName(), getSnapshotId(), getCustomerProvidedKey(), finalEncryptionScope, getVersionId()); } /** * Creates a new {@link BlobAsyncClient} with the specified {@code customerProvidedKey}. * * @param customerProvidedKey the {@link CustomerProvidedKey} for the blob, * pass {@code null} to use no customer provided key. * @return a {@link BlobAsyncClient} with the specified {@code customerProvidedKey}. */ @Override public BlobAsyncClient getCustomerProvidedKeyAsyncClient(CustomerProvidedKey customerProvidedKey) { CpkInfo finalCustomerProvidedKey = null; if (customerProvidedKey != null) { finalCustomerProvidedKey = new CpkInfo() .setEncryptionKey(customerProvidedKey.getKey()) .setEncryptionKeySha256(customerProvidedKey.getKeySha256()) .setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm()); } return new BlobAsyncClient(getHttpPipeline(), getAccountUrl(), getServiceVersion(), getAccountName(), getContainerName(), getBlobName(), getSnapshotId(), finalCustomerProvidedKey, encryptionScope, getVersionId()); } /** * Creates a new {@link AppendBlobAsyncClient} associated with this blob. * * @return A {@link AppendBlobAsyncClient} associated with this blob. */ public AppendBlobAsyncClient getAppendBlobAsyncClient() { if (appendBlobAsyncClient == null) { appendBlobAsyncClient = prepareBuilder().buildAppendBlobAsyncClient(); } return appendBlobAsyncClient; } /** * Creates a new {@link BlockBlobAsyncClient} associated with this blob. * * @return A {@link BlockBlobAsyncClient} associated with this blob. */ public BlockBlobAsyncClient getBlockBlobAsyncClient() { if (blockBlobAsyncClient == null) { blockBlobAsyncClient = prepareBuilder().buildBlockBlobAsyncClient(); } return blockBlobAsyncClient; } /** * Creates a new {@link PageBlobAsyncClient} associated with this blob. * * @return A {@link PageBlobAsyncClient} associated with this blob. */ public PageBlobAsyncClient getPageBlobAsyncClient() { if (pageBlobAsyncClient == null) { pageBlobAsyncClient = prepareBuilder().buildPageBlobAsyncClient(); } return pageBlobAsyncClient; } private SpecializedBlobClientBuilder prepareBuilder() { SpecializedBlobClientBuilder builder = new SpecializedBlobClientBuilder() .pipeline(getHttpPipeline()) .endpoint(getBlobUrl()) .snapshot(getSnapshotId()) .serviceVersion(getServiceVersion()); CpkInfo cpk = getCustomerProvidedKey(); if (cpk != null) { builder.customerProvidedKey(new CustomerProvidedKey(cpk.getEncryptionKey())); } if (encryptionScope != null) { builder.encryptionScope(encryptionScope.getEncryptionScope()); } return builder; } /** * Creates a new block blob. By default, this method will not overwrite an existing blob. * <p> * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported * with this method; the content of the existing blob is overwritten with the new content. To perform a partial * update of a block blob's, use {@link BlockBlobAsyncClient * BlockBlobAsyncClient * <a href="https: * <a href="https: * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method does * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.BlobAsyncClient.upload * <pre> * ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions& * .setBlockSizeLong& * .setMaxConcurrency& * client.upload& * System.out.printf& * Base64.getEncoder& * </pre> * <!-- end com.azure.storage.blob.BlobAsyncClient.upload * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @return A reactive response containing the information of the uploaded block blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<BlockBlobItem> upload(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions) { return upload(data, parallelTransferOptions, false); } /** * Creates a new block blob, or updates the content of an existing block blob. * <p> * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported * with this method; the content of the existing blob is overwritten with the new content. To perform a partial * update of a block blob's, use {@link BlockBlobAsyncClient * BlockBlobAsyncClient * <a href="https: * <a href="https: * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method does * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.BlobAsyncClient.upload * <pre> * ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions& * .setBlockSizeLong& * .setMaxConcurrency& * boolean overwrite = false; & * client.upload& * System.out.printf& * Base64.getEncoder& * </pre> * <!-- end com.azure.storage.blob.BlobAsyncClient.upload * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @param overwrite Whether to overwrite, should the blob already exist. * @return A reactive response containing the information of the uploaded block blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<BlockBlobItem> upload(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions, boolean overwrite) { Mono<Void> overwriteCheck; BlobRequestConditions requestConditions; if (overwrite) { overwriteCheck = Mono.empty(); requestConditions = null; } else { overwriteCheck = exists().flatMap(exists -> exists ? monoError(LOGGER, new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS)) : Mono.empty()); requestConditions = new BlobRequestConditions().setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD); } return overwriteCheck .then(uploadWithResponse(data, parallelTransferOptions, null, null, null, requestConditions)) .flatMap(FluxUtil::toMono); } /** * Creates a new block blob. By default, this method will not overwrite an existing blob. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.BlobAsyncClient.upload * <pre> * client.upload& * System.out.printf& * Base64.getEncoder& * </pre> * <!-- end com.azure.storage.blob.BlobAsyncClient.upload * * @param data The data to write to the blob. * @return A reactive response containing the information of the uploaded block blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<BlockBlobItem> upload(BinaryData data) { return upload(data, false); } /** * Creates a new block blob, or updates the content of an existing block blob. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.BlobAsyncClient.upload * <pre> * boolean overwrite = false; & * client.upload& * System.out.printf& * Base64.getEncoder& * </pre> * <!-- end com.azure.storage.blob.BlobAsyncClient.upload * * @param data The data to write to the blob. * @param overwrite Whether to overwrite, should the blob already exist. * @return A reactive response containing the information of the uploaded block blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<BlockBlobItem> upload(BinaryData data, boolean overwrite) { Mono<Void> overwriteCheck; BlobRequestConditions requestConditions; if (overwrite) { overwriteCheck = Mono.empty(); requestConditions = null; } else { overwriteCheck = exists().flatMap(exists -> exists ? monoError(LOGGER, new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS)) : Mono.empty()); requestConditions = new BlobRequestConditions().setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD); } return overwriteCheck .then(uploadWithResponse(data.toFluxByteBuffer(), null, null, null, null, requestConditions)) .flatMap(FluxUtil::toMono); } /** * Creates a new block blob, or updates the content of an existing block blob. * <p> * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported * with this method; the content of the existing blob is overwritten with the new content. To perform a partial * update of a block blob's, use {@link BlockBlobAsyncClient * BlockBlobAsyncClient * <a href="https: * <a href="https: * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method does * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * <p> * To avoid overwriting, pass "*" to {@link BlobRequestConditions * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.BlobAsyncClient.uploadWithResponse * <pre> * BlobHttpHeaders headers = new BlobHttpHeaders& * .setContentMd5& * .setContentLanguage& * .setContentType& * * Map&lt;String, String&gt; metadata = Collections.singletonMap& * BlobRequestConditions requestConditions = new BlobRequestConditions& * .setLeaseId& * .setIfUnmodifiedSince& * * ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions& * .setBlockSizeLong& * .setMaxConcurrency& * * client.uploadWithResponse& * .subscribe& * Base64.getEncoder& * </pre> * <!-- end com.azure.storage.blob.BlobAsyncClient.uploadWithResponse * * <p><strong>Using Progress Reporting</strong></p> * * <!-- src_embed com.azure.storage.blob.BlobAsyncClient.uploadWithResponse * <pre> * BlobHttpHeaders headers = new BlobHttpHeaders& * .setContentMd5& * .setContentLanguage& * .setContentType& * * Map&lt;String, String&gt; metadata = Collections.singletonMap& * BlobRequestConditions requestConditions = new BlobRequestConditions& * .setLeaseId& * .setIfUnmodifiedSince& * * ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions& * .setBlockSizeLong& * .setMaxConcurrency& * .setProgressReceiver& * * client.uploadWithResponse& * .subscribe& * Base64.getEncoder& * </pre> * <!-- end com.azure.storage.blob.BlobAsyncClient.uploadWithResponse * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @param headers {@link BlobHttpHeaders} * @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any * metadata key or value, it must be removed or encoded. * @param tier {@link AccessTier} for the destination blob. * @param requestConditions {@link BlobRequestConditions} * @return A reactive response containing the information of the uploaded block blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<BlockBlobItem>> uploadWithResponse(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions, BlobHttpHeaders headers, Map<String, String> metadata, AccessTier tier, BlobRequestConditions requestConditions) { try { return this.uploadWithResponse(new BlobParallelUploadOptions(data) .setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata) .setTier(tier).setRequestConditions(requestConditions)); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob. * <p> * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported * with this method; the content of the existing blob is overwritten with the new content. To perform a partial * update of a block blob's, use {@link BlockBlobAsyncClient * BlockBlobAsyncClient * <a href="https: * <a href="https: * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method does * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * <p> * To avoid overwriting, pass "*" to {@link BlobRequestConditions * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.BlobAsyncClient.uploadWithResponse * <pre> * BlobHttpHeaders headers = new BlobHttpHeaders& * .setContentMd5& * .setContentLanguage& * .setContentType& * * Map&lt;String, String&gt; metadata = Collections.singletonMap& * Map&lt;String, String&gt; tags = Collections.singletonMap& * BlobRequestConditions requestConditions = new BlobRequestConditions& * .setLeaseId& * .setIfUnmodifiedSince& * * ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions& * .setMaxConcurrency& * System.out.printf& * * client.uploadWithResponse& * .setParallelTransferOptions& * .setTier& * .subscribe& * Base64.getEncoder& * </pre> * <!-- end com.azure.storage.blob.BlobAsyncClient.uploadWithResponse * * <p><strong>Using Progress Reporting</strong></p> * * <!-- src_embed com.azure.storage.blob.BlobAsyncClient.uploadWithResponse * <pre> * BlobHttpHeaders headers = new BlobHttpHeaders& * .setContentMd5& * .setContentLanguage& * .setContentType& * * Map&lt;String, String&gt; metadata = Collections.singletonMap& * Map&lt;String, String&gt; tags = Collections.singletonMap& * BlobRequestConditions requestConditions = new BlobRequestConditions& * .setLeaseId& * .setIfUnmodifiedSince& * * ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions& * .setMaxConcurrency& * System.out.printf& * * client.uploadWithResponse& * .setParallelTransferOptions& * .setTier& * .subscribe& * Base64.getEncoder& * </pre> * <!-- end com.azure.storage.blob.BlobAsyncClient.uploadWithResponse * * @param options {@link BlobParallelUploadOptions}. Unlike other upload methods, this method does not require that * the {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not * expected to produce the same values across subscriptions. * @return A reactive response containing the information of the uploaded block blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<BlockBlobItem>> uploadWithResponse(BlobParallelUploadOptions options) { /* The following is catalogue of all the places we allocate memory/copy in any upload method a justification for that case current as of 1/13/21. - Async buffered upload chunked upload: We used an UploadBufferPool. This will allocate memory as needed up to the configured maximum. This is necessary to support replayability on retires. Each flux to come out of the pool is a Flux.just() of up to two deep copied buffers, so it is replayable. It also allows us to optimize the upload by uploading the maximum amount per block. Finally, in the case of chunked uploading, it allows the customer to pass data without knowing the size. Note that full upload does not need a deep copy because the Flux emitted by the PayloadSizeGate in the full upload case is already replayable and the length is maintained by the gate. - Sync buffered upload: converting the input stream to a flux involves creating a buffer for each stream read. Using a new buffer per read ensures that the reads are safe and not overwriting data in buffers that were passed to the async upload but have not yet been sent. This covers both full and chunked uploads in the sync case. - BlobOutputStream: A deep copy is made of any buffer passed to write. While async copy does streamline our code and allow for some potential parallelization, this extra copy is necessary to ensure that customers writing to the stream in a tight loop are not overwriting data previously given to the stream before it has been sent. Taken together, these should support retries and protect against data being overwritten in all upload scenarios. One note is that there is no deep copy in the uploadFull method. This is unnecessary as explained in uploadFullOrChunked because the Flux coming out of the size gate in that case is already replayable and reusing buffers is not a common scenario for async like it is in sync (and we already buffer in sync to convert from a stream). */ try { StorageImplUtils.assertNotNull("options", options); final ParallelTransferOptions parallelTransferOptions = ModelHelper.populateAndApplyDefaults(options.getParallelTransferOptions()); final BlobHttpHeaders headers = options.getHeaders(); final Map<String, String> metadata = options.getMetadata(); final Map<String, String> tags = options.getTags(); final AccessTier tier = options.getTier(); final BlobRequestConditions requestConditions = options.getRequestConditions() == null ? new BlobRequestConditions() : options.getRequestConditions(); final boolean computeMd5 = options.isComputeMd5(); final BlobImmutabilityPolicy immutabilityPolicy = options.getImmutabilityPolicy() == null ? new BlobImmutabilityPolicy() : options.getImmutabilityPolicy(); final Boolean legalHold = options.isLegalHold(); BlockBlobAsyncClient blockBlobAsyncClient = getBlockBlobAsyncClient(); Function<Flux<ByteBuffer>, Mono<Response<BlockBlobItem>>> uploadInChunksFunction = (stream) -> uploadInChunks(blockBlobAsyncClient, stream, parallelTransferOptions, headers, metadata, tags, tier, requestConditions, computeMd5, immutabilityPolicy, legalHold); BiFunction<Flux<ByteBuffer>, Long, Mono<Response<BlockBlobItem>>> uploadFullBlobFunction = (stream, length) -> uploadFullBlob(blockBlobAsyncClient, stream, length, parallelTransferOptions, headers, metadata, tags, tier, requestConditions, computeMd5, immutabilityPolicy, legalHold); Flux<ByteBuffer> data = options.getDataFlux(); if (data == null && options.getOptionalLength() == null) { int chunkSize = (int) Math.min(Constants.MAX_INPUT_STREAM_CONVERTER_BUFFER_LENGTH, parallelTransferOptions.getBlockSizeLong()); data = FluxUtil.toFluxByteBuffer(options.getDataStream(), chunkSize); } else if (data == null) { int chunkSize = (int) Math.min(Constants.MAX_INPUT_STREAM_CONVERTER_BUFFER_LENGTH, parallelTransferOptions.getBlockSizeLong()); data = Utility.convertStreamToByteBuffer( options.getDataStream(), options.getOptionalLength(), chunkSize, false); } return UploadUtils.uploadFullOrChunked(data, ModelHelper.wrapBlobOptions(parallelTransferOptions), uploadInChunksFunction, uploadFullBlobFunction); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } private Mono<Response<BlockBlobItem>> uploadFullBlob(BlockBlobAsyncClient blockBlobAsyncClient, Flux<ByteBuffer> data, long length, ParallelTransferOptions parallelTransferOptions, BlobHttpHeaders headers, Map<String, String> metadata, Map<String, String> tags, AccessTier tier, BlobRequestConditions requestConditions, boolean computeMd5, BlobImmutabilityPolicy immutabilityPolicy, Boolean legalHold) { /* Note that there is no need to buffer here as the flux returned by the size gate in this case is created from an iterable and is therefore replayable. */ Flux<ByteBuffer> progressData = ProgressReporter.addProgressReporting(data, parallelTransferOptions.getProgressReceiver()); return UploadUtils.computeMd5(progressData, computeMd5, LOGGER) .map(fluxMd5Wrapper -> new BlockBlobSimpleUploadOptions(fluxMd5Wrapper.getData(), length) .setHeaders(headers) .setMetadata(metadata) .setTags(tags) .setTier(tier) .setRequestConditions(requestConditions) .setContentMd5(fluxMd5Wrapper.getMd5()) .setImmutabilityPolicy(immutabilityPolicy) .setLegalHold(legalHold)) .flatMap(blockBlobAsyncClient::uploadWithResponse); } private Mono<Response<BlockBlobItem>> uploadInChunks(BlockBlobAsyncClient blockBlobAsyncClient, Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions, BlobHttpHeaders headers, Map<String, String> metadata, Map<String, String> tags, AccessTier tier, BlobRequestConditions requestConditions, boolean computeMd5, BlobImmutabilityPolicy immutabilityPolicy, Boolean legalHold) { AtomicLong totalProgress = new AtomicLong(); Lock progressLock = new ReentrantLock(); BufferStagingArea stagingArea = new BufferStagingArea(parallelTransferOptions.getBlockSizeLong(), BlockBlobClient.MAX_STAGE_BLOCK_BYTES_LONG); Flux<ByteBuffer> chunkedSource = UploadUtils.chunkSource(data, ModelHelper.wrapBlobOptions(parallelTransferOptions)); /* Write to the pool and upload the output. maxConcurrency = 1 when writing means only 1 BufferAggregator will be accumulating at a time. parallelTransferOptions.getMaxConcurrency() appends will be happening at once, so we guarantee buffering of only concurrency + 1 chunks at a time. */ return chunkedSource.flatMapSequential(stagingArea::write, 1, 1) .concatWith(Flux.defer(stagingArea::flush)) .flatMapSequential(bufferAggregator -> { Flux<ByteBuffer> progressData = ProgressReporter.addParallelProgressReporting( bufferAggregator.asFlux(), parallelTransferOptions.getProgressReceiver(), progressLock, totalProgress); final String blockId = Base64.getEncoder().encodeToString(UUID.randomUUID().toString().getBytes(UTF_8)); return UploadUtils.computeMd5(progressData, computeMd5, LOGGER) .flatMap(fluxMd5Wrapper -> blockBlobAsyncClient.stageBlockWithResponse(blockId, fluxMd5Wrapper.getData(), bufferAggregator.length(), fluxMd5Wrapper.getMd5(), requestConditions.getLeaseId())) .map(x -> blockId) .flux(); }, parallelTransferOptions.getMaxConcurrency(), 1) .collect(Collectors.toList()) .flatMap(ids -> blockBlobAsyncClient.commitBlockListWithResponse(new BlockBlobCommitBlockListOptions(ids) .setHeaders(headers).setMetadata(metadata).setTags(tags).setTier(tier) .setRequestConditions(requestConditions).setImmutabilityPolicy(immutabilityPolicy) .setLegalHold(legalHold))); } /** * Creates a new block blob with the content of the specified file. By default, this method will not overwrite an * existing blob. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.BlobAsyncClient.uploadFromFile * <pre> * client.uploadFromFile& * .doOnError& * .subscribe& * </pre> * <!-- end com.azure.storage.blob.BlobAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @return An empty response * @throws UncheckedIOException If an I/O error occurs */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> uploadFromFile(String filePath) { return uploadFromFile(filePath, false); } /** * Creates a new block blob, or updates the content of an existing block blob, with the content of the specified * file. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.BlobAsyncClient.uploadFromFile * <pre> * boolean overwrite = false; & * client.uploadFromFile& * .doOnError& * .subscribe& * </pre> * <!-- end com.azure.storage.blob.BlobAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @param overwrite Whether to overwrite, should the blob already exist. * @return An empty response * @throws UncheckedIOException If an I/O error occurs */ @ServiceMethod(returns = ReturnType.SINGLE) /** * Creates a new block blob, or updates the content of an existing block blob, with the content of the specified * file. * <p> * To avoid overwriting, pass "*" to {@link BlobRequestConditions * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.BlobAsyncClient.uploadFromFile * <pre> * BlobHttpHeaders headers = new BlobHttpHeaders& * .setContentMd5& * .setContentLanguage& * .setContentType& * * Map&lt;String, String&gt; metadata = Collections.singletonMap& * BlobRequestConditions requestConditions = new BlobRequestConditions& * .setLeaseId& * .setIfUnmodifiedSince& * * client.uploadFromFile& * new ParallelTransferOptions& * headers, metadata, AccessTier.HOT, requestConditions& * .doOnError& * .subscribe& * </pre> * <!-- end com.azure.storage.blob.BlobAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @param parallelTransferOptions {@link ParallelTransferOptions} to use to upload from file. Number of parallel * transfers parameter is ignored. * @param headers {@link BlobHttpHeaders} * @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any * metadata key or value, it must be removed or encoded. * @param tier {@link AccessTier} for the destination blob. * @param requestConditions {@link BlobRequestConditions} * @return An empty response * @throws UncheckedIOException If an I/O error occurs */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> uploadFromFile(String filePath, ParallelTransferOptions parallelTransferOptions, BlobHttpHeaders headers, Map<String, String> metadata, AccessTier tier, BlobRequestConditions requestConditions) { try { return this.uploadFromFileWithResponse(new BlobUploadFromFileOptions(filePath) .setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata) .setTier(tier).setRequestConditions(requestConditions)) .then(); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob, with the content of the specified * file. * <p> * To avoid overwriting, pass "*" to {@link BlobRequestConditions * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.BlobAsyncClient.uploadFromFileWithResponse * <pre> * BlobHttpHeaders headers = new BlobHttpHeaders& * .setContentMd5& * .setContentLanguage& * .setContentType& * * Map&lt;String, String&gt; metadata = Collections.singletonMap& * Map&lt;String, String&gt; tags = Collections.singletonMap& * BlobRequestConditions requestConditions = new BlobRequestConditions& * .setLeaseId& * .setIfUnmodifiedSince& * * client.uploadFromFileWithResponse& * .setParallelTransferOptions& * new ParallelTransferOptions& * .setHeaders& * .setRequestConditions& * .doOnError& * .subscribe& * </pre> * <!-- end com.azure.storage.blob.BlobAsyncClient.uploadFromFileWithResponse * * @param options {@link BlobUploadFromFileOptions} * @return A reactive response containing the information of the uploaded block blob. * @throws UncheckedIOException If an I/O error occurs */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<BlockBlobItem>> uploadFromFileWithResponse(BlobUploadFromFileOptions options) { StorageImplUtils.assertNotNull("options", options); Long originalBlockSize = (options.getParallelTransferOptions() == null) ? null : options.getParallelTransferOptions().getBlockSizeLong(); final ParallelTransferOptions finalParallelTransferOptions = ModelHelper.populateAndApplyDefaults(options.getParallelTransferOptions()); try { return Mono.using(() -> UploadUtils.uploadFileResourceSupplier(options.getFilePath(), LOGGER), channel -> { try { BlockBlobAsyncClient blockBlobAsyncClient = getBlockBlobAsyncClient(); long fileSize = channel.size(); if (UploadUtils.shouldUploadInChunks(options.getFilePath(), finalParallelTransferOptions.getMaxSingleUploadSizeLong(), LOGGER)) { return uploadFileChunks(fileSize, finalParallelTransferOptions, originalBlockSize, options.getHeaders(), options.getMetadata(), options.getTags(), options.getTier(), options.getRequestConditions(), channel, blockBlobAsyncClient); } else { Flux<ByteBuffer> data = FluxUtil.readFile(channel); if (finalParallelTransferOptions.getProgressReceiver() != null) { data = ProgressReporter.addProgressReporting(data, finalParallelTransferOptions.getProgressReceiver()); } return blockBlobAsyncClient.uploadWithResponse( new BlockBlobSimpleUploadOptions(data, fileSize).setHeaders(options.getHeaders()) .setMetadata(options.getMetadata()).setTags(options.getTags()) .setTier(options.getTier()) .setRequestConditions(options.getRequestConditions())); } } catch (IOException ex) { return Mono.error(ex); } }, channel -> UploadUtils.uploadFileCleanup(channel, LOGGER)); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } private Mono<Response<BlockBlobItem>> uploadFileChunks( long fileSize, ParallelTransferOptions parallelTransferOptions, Long originalBlockSize, BlobHttpHeaders headers, Map<String, String> metadata, Map<String, String> tags, AccessTier tier, BlobRequestConditions requestConditions, AsynchronousFileChannel channel, BlockBlobAsyncClient client) { final BlobRequestConditions finalRequestConditions = (requestConditions == null) ? new BlobRequestConditions() : requestConditions; AtomicLong totalProgress = new AtomicLong(); Lock progressLock = new ReentrantLock(); final SortedMap<Long, String> blockIds = new TreeMap<>(); return Flux.fromIterable(sliceFile(fileSize, originalBlockSize, parallelTransferOptions.getBlockSizeLong())) .flatMap(chunk -> { String blockId = getBlockID(); blockIds.put(chunk.getOffset(), blockId); Flux<ByteBuffer> progressData = ProgressReporter.addParallelProgressReporting( FluxUtil.readFile(channel, chunk.getOffset(), chunk.getCount()), parallelTransferOptions.getProgressReceiver(), progressLock, totalProgress); return client.stageBlockWithResponse(blockId, progressData, chunk.getCount(), null, finalRequestConditions.getLeaseId()); }, parallelTransferOptions.getMaxConcurrency()) .then(Mono.defer(() -> client.commitBlockListWithResponse( new BlockBlobCommitBlockListOptions(new ArrayList<>(blockIds.values())) .setHeaders(headers).setMetadata(metadata).setTags(tags).setTier(tier) .setRequestConditions(finalRequestConditions)))); } /** * RESERVED FOR INTERNAL USE. * * Resource Supplier for UploadFile. * * @param filePath The path for the file * @return {@code AsynchronousFileChannel} * @throws UncheckedIOException an input output exception. * @deprecated due to refactoring code to be in the common storage library. */ @Deprecated protected AsynchronousFileChannel uploadFileResourceSupplier(String filePath) { return UploadUtils.uploadFileResourceSupplier(filePath, LOGGER); } private String getBlockID() { return Base64.getEncoder().encodeToString(UUID.randomUUID().toString().getBytes(StandardCharsets.UTF_8)); } private List<BlobRange> sliceFile(long fileSize, Long originalBlockSize, long blockSize) { List<BlobRange> ranges = new ArrayList<>(); if (fileSize > 100 * Constants.MB && originalBlockSize == null) { blockSize = BLOB_DEFAULT_HTBB_UPLOAD_BLOCK_SIZE; } for (long pos = 0; pos < fileSize; pos += blockSize) { long count = blockSize; if (pos + count > fileSize) { count = fileSize - pos; } ranges.add(new BlobRange(pos, count)); } return ranges; } }