comment
stringlengths
1
5.49k
method_body
stringlengths
27
75.2k
target_code
stringlengths
0
5.16k
method_body_after
stringlengths
27
76k
context_before
stringlengths
8
252k
context_after
stringlengths
8
253k
This `accept` is now before `request.setBody(validateLength(request));`, will it be a problem?
public Object invoke(Object proxy, final Method method, Object[] args) { validateResumeOperationIsNotPresent(method); try { final SwaggerMethodParser methodParser = getMethodParser(method); final HttpRequest request = createHttpRequest(methodParser, args); Context context = methodParser.setContext(args); RequestOptions options = methodParser.setRequestOptions(args); if (options != null) { options.getRequestCallback().accept(request); Context optionsContext = options.getContext(); if (optionsContext != null && optionsContext != Context.NONE) { for (Map.Entry<Object, Object> kvp : optionsContext.getValues().entrySet()) { context = context.addData(kvp.getKey(), kvp.getValue()); } } } context.addData("caller-method", methodParser.getFullyQualifiedMethodName()) .addData("azure-eagerly-read-response", shouldEagerlyReadResponse(methodParser.getReturnType())); context = startTracingSpan(method, context); if (request.getBody() != null) { request.setBody(validateLength(request)); } final Mono<HttpResponse> asyncResponse = send(request, context); Mono<HttpDecodedResponse> asyncDecodedResponse = this.decoder.decode(asyncResponse, methodParser); return handleRestReturnType(asyncDecodedResponse, methodParser, methodParser.getReturnType(), context, options); } catch (IOException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } }
options.getRequestCallback().accept(request);
public Object invoke(Object proxy, final Method method, Object[] args) { validateResumeOperationIsNotPresent(method); try { final SwaggerMethodParser methodParser = getMethodParser(method); final HttpRequest request = createHttpRequest(methodParser, args); Context context = methodParser.setContext(args); RequestOptions options = methodParser.setRequestOptions(args); context = mergeRequestOptionsContext(context, options); context = context.addData("caller-method", methodParser.getFullyQualifiedMethodName()) .addData("azure-eagerly-read-response", shouldEagerlyReadResponse(methodParser.getReturnType())); context = startTracingSpan(method, context); if (request.getBody() != null) { request.setBody(validateLength(request)); } if (options != null) { options.getRequestCallback().accept(request); } final Mono<HttpResponse> asyncResponse = send(request, context); Mono<HttpDecodedResponse> asyncDecodedResponse = this.decoder.decode(asyncResponse, methodParser); return handleRestReturnType(asyncDecodedResponse, methodParser, methodParser.getReturnType(), context, options); } catch (IOException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } }
class RestProxy implements InvocationHandler { private static final ByteBuffer VALIDATION_BUFFER = ByteBuffer.allocate(0); private static final String BODY_TOO_LARGE = "Request body emitted %d bytes, more than the expected %d bytes."; private static final String BODY_TOO_SMALL = "Request body emitted %d bytes, less than the expected %d bytes."; private static final String MUST_IMPLEMENT_PAGE_ERROR = "Unable to create PagedResponse<T>. Body must be of a type that implements: " + Page.class; private static final ResponseConstructorsCache RESPONSE_CONSTRUCTORS_CACHE = new ResponseConstructorsCache(); private final ClientLogger logger = new ClientLogger(RestProxy.class); private final HttpPipeline httpPipeline; private final SerializerAdapter serializer; private final SwaggerInterfaceParser interfaceParser; private final HttpResponseDecoder decoder; /** * Create a RestProxy. * * @param httpPipeline the HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP requests. * @param serializer the serializer that will be used to convert response bodies to POJOs. * @param interfaceParser the parser that contains information about the interface describing REST API methods that * this RestProxy "implements". */ private RestProxy(HttpPipeline httpPipeline, SerializerAdapter serializer, SwaggerInterfaceParser interfaceParser) { this.httpPipeline = httpPipeline; this.serializer = serializer; this.interfaceParser = interfaceParser; this.decoder = new HttpResponseDecoder(this.serializer); } /** * Get the SwaggerMethodParser for the provided method. The Method must exist on the Swagger interface that this * RestProxy was created to "implement". * * @param method the method to get a SwaggerMethodParser for * @return the SwaggerMethodParser for the provided method */ private SwaggerMethodParser getMethodParser(Method method) { return interfaceParser.getMethodParser(method); } /** * Send the provided request asynchronously, applying any request policies provided to the HttpClient instance. * * @param request the HTTP request to send * @param contextData the context * @return a {@link Mono} that emits HttpResponse asynchronously */ public Mono<HttpResponse> send(HttpRequest request, Context contextData) { return httpPipeline.send(request, contextData); } @Override @SuppressWarnings("deprecation") void validateResumeOperationIsNotPresent(Method method) { if (method.isAnnotationPresent(com.azure.core.annotation.ResumeOperation.class)) { throw logger.logExceptionAsError(Exceptions.propagate(new Exception("'ResumeOperation' isn't supported."))); } } static Flux<ByteBuffer> validateLength(final HttpRequest request) { final Flux<ByteBuffer> bbFlux = request.getBody(); if (bbFlux == null) { return Flux.empty(); } final long expectedLength = Long.parseLong(request.getHeaders().getValue("Content-Length")); return Flux.defer(() -> { final long[] currentTotalLength = new long[1]; return Flux.concat(bbFlux, Flux.just(VALIDATION_BUFFER)).handle((buffer, sink) -> { if (buffer == null) { return; } if (buffer == VALIDATION_BUFFER) { if (expectedLength != currentTotalLength[0]) { sink.error(new UnexpectedLengthException(String.format(BODY_TOO_SMALL, currentTotalLength[0], expectedLength), currentTotalLength[0], expectedLength)); } else { sink.complete(); } return; } currentTotalLength[0] += buffer.remaining(); if (currentTotalLength[0] > expectedLength) { sink.error(new UnexpectedLengthException(String.format(BODY_TOO_LARGE, currentTotalLength[0], expectedLength), currentTotalLength[0], expectedLength)); return; } sink.next(buffer); }); }); } /** * Starts the tracing span for the current service call, additionally set metadata attributes on the span by passing * additional context information. * * @param method Service method being called. * @param context Context information about the current service call. * @return The updated context containing the span context. */ private Context startTracingSpan(Method method, Context context) { if (!TracerProxy.isTracingEnabled()) { return context; } if ((boolean) context.getData(Tracer.DISABLE_TRACING_KEY).orElse(false)) { return context; } String spanName = interfaceParser.getServiceName() + "." + method.getName(); context = TracerProxy.setSpanName(spanName, context); return TracerProxy.start(spanName, context); } /** * Create a HttpRequest for the provided Swagger method using the provided arguments. * * @param methodParser the Swagger method parser to use * @param args the arguments to use to populate the method's annotation values * @return a HttpRequest * @throws IOException thrown if the body contents cannot be serialized */ private HttpRequest createHttpRequest(SwaggerMethodParser methodParser, Object[] args) throws IOException { final String path = methodParser.setPath(args); final UrlBuilder pathUrlBuilder = UrlBuilder.parse(path); final UrlBuilder urlBuilder; if (pathUrlBuilder.getScheme() != null) { urlBuilder = pathUrlBuilder; } else { urlBuilder = new UrlBuilder(); methodParser.setSchemeAndHost(args, urlBuilder); if (path != null && !path.isEmpty() && !"/".equals(path)) { String hostPath = urlBuilder.getPath(); if (hostPath == null || hostPath.isEmpty() || "/".equals(hostPath) || path.contains(": urlBuilder.setPath(path); } else { if (path.startsWith("/")) { urlBuilder.setPath(hostPath + path); } else { urlBuilder.setPath(hostPath + "/" + path); } } } } methodParser.setEncodedQueryParameters(args, urlBuilder); final URL url = urlBuilder.toUrl(); final HttpRequest request = configRequest(new HttpRequest(methodParser.getHttpMethod(), url), methodParser, args); HttpHeaders httpHeaders = request.getHeaders(); methodParser.setHeaders(args, httpHeaders); return request; } @SuppressWarnings("unchecked") private HttpRequest configRequest(final HttpRequest request, final SwaggerMethodParser methodParser, final Object[] args) throws IOException { final Object bodyContentObject = methodParser.setBody(args); if (bodyContentObject == null) { request.getHeaders().set("Content-Length", "0"); } else { String contentType = methodParser.getBodyContentType(); if (contentType == null || contentType.isEmpty()) { if (bodyContentObject instanceof byte[] || bodyContentObject instanceof String) { contentType = ContentType.APPLICATION_OCTET_STREAM; } else { contentType = ContentType.APPLICATION_JSON; } } request.getHeaders().set("Content-Type", contentType); if (bodyContentObject instanceof BinaryData) { BinaryData binaryData = (BinaryData) bodyContentObject; if (binaryData.getLength() != null) { request.setHeader("Content-Length", binaryData.getLength().toString()); } request.setBody(binaryData.toFluxByteBuffer()); return request; } boolean isJson = false; final String[] contentTypeParts = contentType.split(";"); for (final String contentTypePart : contentTypeParts) { if (contentTypePart.trim().equalsIgnoreCase(ContentType.APPLICATION_JSON)) { isJson = true; break; } } if (isJson) { ByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(); serializer.serialize(bodyContentObject, SerializerEncoding.JSON, stream); request.setHeader("Content-Length", String.valueOf(stream.size())); request.setBody(Flux.defer(() -> Flux.just(ByteBuffer.wrap(stream.toByteArray(), 0, stream.size())))); } else if (FluxUtil.isFluxByteBuffer(methodParser.getBodyJavaType())) { request.setBody((Flux<ByteBuffer>) bodyContentObject); } else if (bodyContentObject instanceof byte[]) { request.setBody((byte[]) bodyContentObject); } else if (bodyContentObject instanceof String) { final String bodyContentString = (String) bodyContentObject; if (!bodyContentString.isEmpty()) { request.setBody(bodyContentString); } } else if (bodyContentObject instanceof ByteBuffer) { request.setBody(Flux.just((ByteBuffer) bodyContentObject)); } else { ByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(); serializer.serialize(bodyContentObject, SerializerEncoding.fromHeaders(request.getHeaders()), stream); request.setHeader("Content-Length", String.valueOf(stream.size())); request.setBody(Flux.defer(() -> Flux.just(ByteBuffer.wrap(stream.toByteArray(), 0, stream.size())))); } } return request; } private Mono<HttpDecodedResponse> ensureExpectedStatus(final Mono<HttpDecodedResponse> asyncDecodedResponse, final SwaggerMethodParser methodParser, RequestOptions options) { return asyncDecodedResponse .flatMap(decodedHttpResponse -> ensureExpectedStatus(decodedHttpResponse, methodParser, options)); } private static Exception instantiateUnexpectedException(final UnexpectedExceptionInformation exception, final HttpResponse httpResponse, final byte[] responseContent, final Object responseDecodedContent) { final int responseStatusCode = httpResponse.getStatusCode(); final String contentType = httpResponse.getHeaderValue("Content-Type"); final String bodyRepresentation; if ("application/octet-stream".equalsIgnoreCase(contentType)) { bodyRepresentation = "(" + httpResponse.getHeaderValue("Content-Length") + "-byte body)"; } else { bodyRepresentation = responseContent == null || responseContent.length == 0 ? "(empty body)" : "\"" + new String(responseContent, StandardCharsets.UTF_8) + "\""; } Exception result; try { final Constructor<? extends HttpResponseException> exceptionConstructor = exception.getExceptionType() .getConstructor(String.class, HttpResponse.class, exception.getExceptionBodyType()); result = exceptionConstructor.newInstance("Status code " + responseStatusCode + ", " + bodyRepresentation, httpResponse, responseDecodedContent); } catch (ReflectiveOperationException e) { String message = "Status code " + responseStatusCode + ", but an instance of " + exception.getExceptionType().getCanonicalName() + " cannot be created." + " Response body: " + bodyRepresentation; result = new IOException(message, e); } return result; } /** * Create a publisher that (1) emits error if the provided response {@code decodedResponse} has 'disallowed status * code' OR (2) emits provided response if it's status code ia allowed. * * 'disallowed status code' is one of the status code defined in the provided SwaggerMethodParser or is in the int[] * of additional allowed status codes. * * @param decodedResponse The HttpResponse to check. * @param methodParser The method parser that contains information about the service interface method that initiated * the HTTP request. * @return An async-version of the provided decodedResponse. */ private Mono<HttpDecodedResponse> ensureExpectedStatus(final HttpDecodedResponse decodedResponse, final SwaggerMethodParser methodParser, RequestOptions options) { final int responseStatusCode = decodedResponse.getSourceResponse().getStatusCode(); if (methodParser.isExpectedResponseStatusCode(responseStatusCode) || (options != null && !options.isThrowOnError())) { return Mono.just(decodedResponse); } return decodedResponse.getSourceResponse().getBodyAsByteArray() .switchIfEmpty(Mono.defer(() -> { return Mono.error(instantiateUnexpectedException( methodParser.getUnexpectedException(responseStatusCode), decodedResponse.getSourceResponse(), null, null)); })) .flatMap(responseBytes -> decodedResponse.getDecodedBody(responseBytes) .switchIfEmpty(Mono.defer(() -> { return Mono.error(instantiateUnexpectedException( methodParser.getUnexpectedException(responseStatusCode), decodedResponse.getSourceResponse(), responseBytes, null)); })) .flatMap(decodedBody -> { return Mono.error(instantiateUnexpectedException( methodParser.getUnexpectedException(responseStatusCode), decodedResponse.getSourceResponse(), responseBytes, decodedBody)); })); } private Mono<?> handleRestResponseReturnType(final HttpDecodedResponse response, final SwaggerMethodParser methodParser, final Type entityType) { if (TypeUtil.isTypeOrSubTypeOf(entityType, Response.class)) { final Type bodyType = TypeUtil.getRestResponseBodyType(entityType); if (TypeUtil.isTypeOrSubTypeOf(bodyType, Void.class)) { return response.getSourceResponse().getBody().ignoreElements() .then(createResponse(response, entityType, null)); } else { return handleBodyReturnType(response, methodParser, bodyType) .flatMap(bodyAsObject -> createResponse(response, entityType, bodyAsObject)) .switchIfEmpty(Mono.defer((Supplier<Mono<Response<?>>>) () -> createResponse(response, entityType, null))); } } else { return handleBodyReturnType(response, methodParser, entityType); } } @SuppressWarnings("unchecked") private Mono<Response<?>> createResponse(HttpDecodedResponse response, Type entityType, Object bodyAsObject) { Class<? extends Response<?>> cls = (Class<? extends Response<?>>) TypeUtil.getRawClass(entityType); if (cls.equals(Response.class)) { cls = (Class<? extends Response<?>>) (Object) ResponseBase.class; } else if (cls.equals(PagedResponse.class)) { cls = (Class<? extends Response<?>>) (Object) PagedResponseBase.class; if (bodyAsObject != null && !TypeUtil.isTypeOrSubTypeOf(bodyAsObject.getClass(), Page.class)) { return monoError(logger, new RuntimeException(MUST_IMPLEMENT_PAGE_ERROR)); } } final Class<? extends Response<?>> clsFinal = cls; return Mono.just(RESPONSE_CONSTRUCTORS_CACHE.get(clsFinal)) .switchIfEmpty(Mono.defer(() -> Mono.error(new RuntimeException("Cannot find suitable constructor for class " + clsFinal)))) .flatMap(ctr -> RESPONSE_CONSTRUCTORS_CACHE.invoke(ctr, response, bodyAsObject)); } private Mono<?> handleBodyReturnType(final HttpDecodedResponse response, final SwaggerMethodParser methodParser, final Type entityType) { final int responseStatusCode = response.getSourceResponse().getStatusCode(); final HttpMethod httpMethod = methodParser.getHttpMethod(); final Type returnValueWireType = methodParser.getReturnValueWireType(); final Mono<?> asyncResult; if (httpMethod == HttpMethod.HEAD && (TypeUtil.isTypeOrSubTypeOf( entityType, Boolean.TYPE) || TypeUtil.isTypeOrSubTypeOf(entityType, Boolean.class))) { boolean isSuccess = (responseStatusCode / 100) == 2; asyncResult = Mono.just(isSuccess); } else if (TypeUtil.isTypeOrSubTypeOf(entityType, byte[].class)) { Mono<byte[]> responseBodyBytesAsync = response.getSourceResponse().getBodyAsByteArray(); if (returnValueWireType == Base64Url.class) { responseBodyBytesAsync = responseBodyBytesAsync .mapNotNull(base64UrlBytes -> new Base64Url(base64UrlBytes).decodedBytes()); } asyncResult = responseBodyBytesAsync; } else if (FluxUtil.isFluxByteBuffer(entityType)) { asyncResult = Mono.just(response.getSourceResponse().getBody()); } else if (TypeUtil.isTypeOrSubTypeOf(entityType, BinaryData.class)) { asyncResult = BinaryData.fromFlux(response.getSourceResponse().getBody()); } else { asyncResult = response.getDecodedBody((byte[]) null); } return asyncResult; } /** * Handle the provided asynchronous HTTP response and return the deserialized value. * * @param asyncHttpDecodedResponse the asynchronous HTTP response to the original HTTP request * @param methodParser the SwaggerMethodParser that the request originates from * @param returnType the type of value that will be returned * @param context Additional context that is passed through the Http pipeline during the service call. * @return the deserialized result */ private Object handleRestReturnType(final Mono<HttpDecodedResponse> asyncHttpDecodedResponse, final SwaggerMethodParser methodParser, final Type returnType, final Context context, final RequestOptions options) { final Mono<HttpDecodedResponse> asyncExpectedResponse = ensureExpectedStatus(asyncHttpDecodedResponse, methodParser, options) .doOnEach(RestProxy::endTracingSpan) .contextWrite(reactor.util.context.Context.of("TRACING_CONTEXT", context)); final Object result; if (TypeUtil.isTypeOrSubTypeOf(returnType, Mono.class)) { final Type monoTypeParam = TypeUtil.getTypeArgument(returnType); if (TypeUtil.isTypeOrSubTypeOf(monoTypeParam, Void.class)) { result = asyncExpectedResponse.then(); } else { result = asyncExpectedResponse.flatMap(response -> handleRestResponseReturnType(response, methodParser, monoTypeParam)); } } else if (FluxUtil.isFluxByteBuffer(returnType)) { result = asyncExpectedResponse.flatMapMany(ar -> ar.getSourceResponse().getBody()); } else if (TypeUtil.isTypeOrSubTypeOf(returnType, void.class) || TypeUtil.isTypeOrSubTypeOf(returnType, Void.class)) { asyncExpectedResponse.block(); result = null; } else { result = asyncExpectedResponse .flatMap(httpResponse -> handleRestResponseReturnType(httpResponse, methodParser, returnType)) .block(); } return result; } private static void endTracingSpan(Signal<HttpDecodedResponse> signal) { if (!TracerProxy.isTracingEnabled()) { return; } if (signal.isOnComplete() || signal.isOnSubscribe()) { return; } ContextView context = signal.getContextView(); Optional<Context> tracingContext = context.getOrEmpty("TRACING_CONTEXT"); boolean disableTracing = Boolean.TRUE.equals(context.getOrDefault(Tracer.DISABLE_TRACING_KEY, false)); if (!tracingContext.isPresent() || disableTracing) { return; } int statusCode = 0; HttpDecodedResponse httpDecodedResponse; Throwable throwable = null; if (signal.hasValue()) { httpDecodedResponse = signal.get(); statusCode = httpDecodedResponse.getSourceResponse().getStatusCode(); } else if (signal.hasError()) { throwable = signal.getThrowable(); if (throwable instanceof HttpResponseException) { HttpResponseException exception = (HttpResponseException) throwable; statusCode = exception.getResponse().getStatusCode(); } } TracerProxy.end(statusCode, throwable, tracingContext.get()); } /** * Create an instance of the default serializer. * * @return the default serializer */ private static SerializerAdapter createDefaultSerializer() { return JacksonAdapter.createDefaultSerializerAdapter(); } /** * Create the default HttpPipeline. * * @return the default HttpPipeline */ private static HttpPipeline createDefaultPipeline() { List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new UserAgentPolicy()); policies.add(new RetryPolicy()); policies.add(new CookiePolicy()); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param <A> the type of the Swagger interface * @return a proxy implementation of the provided Swagger interface */ public static <A> A create(Class<A> swaggerInterface) { return create(swaggerInterface, createDefaultPipeline(), createDefaultSerializer()); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param httpPipeline the HttpPipelinePolicy and HttpClient pipeline that will be used to send Http requests * @param <A> the type of the Swagger interface * @return a proxy implementation of the provided Swagger interface */ public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline) { return create(swaggerInterface, httpPipeline, createDefaultSerializer()); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param httpPipeline the HttpPipelinePolicy and HttpClient pipline that will be used to send Http requests * @param serializer the serializer that will be used to convert POJOs to and from request and response bodies * @param <A> the type of the Swagger interface. * @return a proxy implementation of the provided Swagger interface */ @SuppressWarnings("unchecked") public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline, SerializerAdapter serializer) { final SwaggerInterfaceParser interfaceParser = new SwaggerInterfaceParser(swaggerInterface, serializer); final RestProxy restProxy = new RestProxy(httpPipeline, serializer, interfaceParser); return (A) Proxy.newProxyInstance(swaggerInterface.getClassLoader(), new Class<?>[]{swaggerInterface}, restProxy); } }
class RestProxy implements InvocationHandler { private static final ByteBuffer VALIDATION_BUFFER = ByteBuffer.allocate(0); private static final String BODY_TOO_LARGE = "Request body emitted %d bytes, more than the expected %d bytes."; private static final String BODY_TOO_SMALL = "Request body emitted %d bytes, less than the expected %d bytes."; private static final String MUST_IMPLEMENT_PAGE_ERROR = "Unable to create PagedResponse<T>. Body must be of a type that implements: " + Page.class; private static final ResponseConstructorsCache RESPONSE_CONSTRUCTORS_CACHE = new ResponseConstructorsCache(); private final ClientLogger logger = new ClientLogger(RestProxy.class); private final HttpPipeline httpPipeline; private final SerializerAdapter serializer; private final SwaggerInterfaceParser interfaceParser; private final HttpResponseDecoder decoder; /** * Create a RestProxy. * * @param httpPipeline the HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP requests. * @param serializer the serializer that will be used to convert response bodies to POJOs. * @param interfaceParser the parser that contains information about the interface describing REST API methods that * this RestProxy "implements". */ private RestProxy(HttpPipeline httpPipeline, SerializerAdapter serializer, SwaggerInterfaceParser interfaceParser) { this.httpPipeline = httpPipeline; this.serializer = serializer; this.interfaceParser = interfaceParser; this.decoder = new HttpResponseDecoder(this.serializer); } /** * Get the SwaggerMethodParser for the provided method. The Method must exist on the Swagger interface that this * RestProxy was created to "implement". * * @param method the method to get a SwaggerMethodParser for * @return the SwaggerMethodParser for the provided method */ private SwaggerMethodParser getMethodParser(Method method) { return interfaceParser.getMethodParser(method); } /** * Send the provided request asynchronously, applying any request policies provided to the HttpClient instance. * * @param request the HTTP request to send * @param contextData the context * @return a {@link Mono} that emits HttpResponse asynchronously */ public Mono<HttpResponse> send(HttpRequest request, Context contextData) { return httpPipeline.send(request, contextData); } @Override @SuppressWarnings("deprecation") void validateResumeOperationIsNotPresent(Method method) { if (method.isAnnotationPresent(com.azure.core.annotation.ResumeOperation.class)) { throw logger.logExceptionAsError(Exceptions.propagate(new Exception("'ResumeOperation' isn't supported."))); } } static Context mergeRequestOptionsContext(Context context, RequestOptions options) { if (options == null) { return context; } Context optionsContext = options.getContext(); if (optionsContext != null && optionsContext != Context.NONE) { for (Map.Entry<Object, Object> kvp : optionsContext.getValues().entrySet()) { context = context.addData(kvp.getKey(), kvp.getValue()); } } return context; } static Flux<ByteBuffer> validateLength(final HttpRequest request) { final Flux<ByteBuffer> bbFlux = request.getBody(); if (bbFlux == null) { return Flux.empty(); } final long expectedLength = Long.parseLong(request.getHeaders().getValue("Content-Length")); return Flux.defer(() -> { final long[] currentTotalLength = new long[1]; return Flux.concat(bbFlux, Flux.just(VALIDATION_BUFFER)).handle((buffer, sink) -> { if (buffer == null) { return; } if (buffer == VALIDATION_BUFFER) { if (expectedLength != currentTotalLength[0]) { sink.error(new UnexpectedLengthException(String.format(BODY_TOO_SMALL, currentTotalLength[0], expectedLength), currentTotalLength[0], expectedLength)); } else { sink.complete(); } return; } currentTotalLength[0] += buffer.remaining(); if (currentTotalLength[0] > expectedLength) { sink.error(new UnexpectedLengthException(String.format(BODY_TOO_LARGE, currentTotalLength[0], expectedLength), currentTotalLength[0], expectedLength)); return; } sink.next(buffer); }); }); } /** * Starts the tracing span for the current service call, additionally set metadata attributes on the span by passing * additional context information. * * @param method Service method being called. * @param context Context information about the current service call. * @return The updated context containing the span context. */ private Context startTracingSpan(Method method, Context context) { if (!TracerProxy.isTracingEnabled()) { return context; } if ((boolean) context.getData(Tracer.DISABLE_TRACING_KEY).orElse(false)) { return context; } String spanName = interfaceParser.getServiceName() + "." + method.getName(); context = TracerProxy.setSpanName(spanName, context); return TracerProxy.start(spanName, context); } /** * Create a HttpRequest for the provided Swagger method using the provided arguments. * * @param methodParser the Swagger method parser to use * @param args the arguments to use to populate the method's annotation values * @return a HttpRequest * @throws IOException thrown if the body contents cannot be serialized */ private HttpRequest createHttpRequest(SwaggerMethodParser methodParser, Object[] args) throws IOException { final String path = methodParser.setPath(args); final UrlBuilder pathUrlBuilder = UrlBuilder.parse(path); final UrlBuilder urlBuilder; if (pathUrlBuilder.getScheme() != null) { urlBuilder = pathUrlBuilder; } else { urlBuilder = new UrlBuilder(); methodParser.setSchemeAndHost(args, urlBuilder); if (path != null && !path.isEmpty() && !"/".equals(path)) { String hostPath = urlBuilder.getPath(); if (hostPath == null || hostPath.isEmpty() || "/".equals(hostPath) || path.contains(": urlBuilder.setPath(path); } else { if (path.startsWith("/")) { urlBuilder.setPath(hostPath + path); } else { urlBuilder.setPath(hostPath + "/" + path); } } } } methodParser.setEncodedQueryParameters(args, urlBuilder); final URL url = urlBuilder.toUrl(); final HttpRequest request = configRequest(new HttpRequest(methodParser.getHttpMethod(), url), methodParser, args); HttpHeaders httpHeaders = request.getHeaders(); methodParser.setHeaders(args, httpHeaders); return request; } @SuppressWarnings("unchecked") private HttpRequest configRequest(final HttpRequest request, final SwaggerMethodParser methodParser, final Object[] args) throws IOException { final Object bodyContentObject = methodParser.setBody(args); if (bodyContentObject == null) { request.getHeaders().set("Content-Length", "0"); } else { String contentType = methodParser.getBodyContentType(); if (contentType == null || contentType.isEmpty()) { if (bodyContentObject instanceof byte[] || bodyContentObject instanceof String) { contentType = ContentType.APPLICATION_OCTET_STREAM; } else { contentType = ContentType.APPLICATION_JSON; } } request.getHeaders().set("Content-Type", contentType); if (bodyContentObject instanceof BinaryData) { BinaryData binaryData = (BinaryData) bodyContentObject; if (binaryData.getLength() != null) { request.setHeader("Content-Length", binaryData.getLength().toString()); } request.setBody(binaryData.toFluxByteBuffer()); return request; } boolean isJson = false; final String[] contentTypeParts = contentType.split(";"); for (final String contentTypePart : contentTypeParts) { if (contentTypePart.trim().equalsIgnoreCase(ContentType.APPLICATION_JSON)) { isJson = true; break; } } if (isJson) { ByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(); serializer.serialize(bodyContentObject, SerializerEncoding.JSON, stream); request.setHeader("Content-Length", String.valueOf(stream.size())); request.setBody(Flux.defer(() -> Flux.just(ByteBuffer.wrap(stream.toByteArray(), 0, stream.size())))); } else if (FluxUtil.isFluxByteBuffer(methodParser.getBodyJavaType())) { request.setBody((Flux<ByteBuffer>) bodyContentObject); } else if (bodyContentObject instanceof byte[]) { request.setBody((byte[]) bodyContentObject); } else if (bodyContentObject instanceof String) { final String bodyContentString = (String) bodyContentObject; if (!bodyContentString.isEmpty()) { request.setBody(bodyContentString); } } else if (bodyContentObject instanceof ByteBuffer) { request.setBody(Flux.just((ByteBuffer) bodyContentObject)); } else { ByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(); serializer.serialize(bodyContentObject, SerializerEncoding.fromHeaders(request.getHeaders()), stream); request.setHeader("Content-Length", String.valueOf(stream.size())); request.setBody(Flux.defer(() -> Flux.just(ByteBuffer.wrap(stream.toByteArray(), 0, stream.size())))); } } return request; } private Mono<HttpDecodedResponse> ensureExpectedStatus(final Mono<HttpDecodedResponse> asyncDecodedResponse, final SwaggerMethodParser methodParser, RequestOptions options) { return asyncDecodedResponse .flatMap(decodedHttpResponse -> ensureExpectedStatus(decodedHttpResponse, methodParser, options)); } private static Exception instantiateUnexpectedException(final UnexpectedExceptionInformation exception, final HttpResponse httpResponse, final byte[] responseContent, final Object responseDecodedContent) { final int responseStatusCode = httpResponse.getStatusCode(); final String contentType = httpResponse.getHeaderValue("Content-Type"); final String bodyRepresentation; if ("application/octet-stream".equalsIgnoreCase(contentType)) { bodyRepresentation = "(" + httpResponse.getHeaderValue("Content-Length") + "-byte body)"; } else { bodyRepresentation = responseContent == null || responseContent.length == 0 ? "(empty body)" : "\"" + new String(responseContent, StandardCharsets.UTF_8) + "\""; } Exception result; try { final Constructor<? extends HttpResponseException> exceptionConstructor = exception.getExceptionType() .getConstructor(String.class, HttpResponse.class, exception.getExceptionBodyType()); result = exceptionConstructor.newInstance("Status code " + responseStatusCode + ", " + bodyRepresentation, httpResponse, responseDecodedContent); } catch (ReflectiveOperationException e) { String message = "Status code " + responseStatusCode + ", but an instance of " + exception.getExceptionType().getCanonicalName() + " cannot be created." + " Response body: " + bodyRepresentation; result = new IOException(message, e); } return result; } /** * Create a publisher that (1) emits error if the provided response {@code decodedResponse} has 'disallowed status * code' OR (2) emits provided response if it's status code ia allowed. * * 'disallowed status code' is one of the status code defined in the provided SwaggerMethodParser or is in the int[] * of additional allowed status codes. * * @param decodedResponse The HttpResponse to check. * @param methodParser The method parser that contains information about the service interface method that initiated * the HTTP request. * @return An async-version of the provided decodedResponse. */ private Mono<HttpDecodedResponse> ensureExpectedStatus(final HttpDecodedResponse decodedResponse, final SwaggerMethodParser methodParser, RequestOptions options) { final int responseStatusCode = decodedResponse.getSourceResponse().getStatusCode(); if (methodParser.isExpectedResponseStatusCode(responseStatusCode) || (options != null && !options.isThrowOnError())) { return Mono.just(decodedResponse); } return decodedResponse.getSourceResponse().getBodyAsByteArray() .switchIfEmpty(Mono.defer(() -> { return Mono.error(instantiateUnexpectedException( methodParser.getUnexpectedException(responseStatusCode), decodedResponse.getSourceResponse(), null, null)); })) .flatMap(responseBytes -> decodedResponse.getDecodedBody(responseBytes) .switchIfEmpty(Mono.defer(() -> { return Mono.error(instantiateUnexpectedException( methodParser.getUnexpectedException(responseStatusCode), decodedResponse.getSourceResponse(), responseBytes, null)); })) .flatMap(decodedBody -> { return Mono.error(instantiateUnexpectedException( methodParser.getUnexpectedException(responseStatusCode), decodedResponse.getSourceResponse(), responseBytes, decodedBody)); })); } private Mono<?> handleRestResponseReturnType(final HttpDecodedResponse response, final SwaggerMethodParser methodParser, final Type entityType) { if (TypeUtil.isTypeOrSubTypeOf(entityType, Response.class)) { final Type bodyType = TypeUtil.getRestResponseBodyType(entityType); if (TypeUtil.isTypeOrSubTypeOf(bodyType, Void.class)) { return response.getSourceResponse().getBody().ignoreElements() .then(createResponse(response, entityType, null)); } else { return handleBodyReturnType(response, methodParser, bodyType) .flatMap(bodyAsObject -> createResponse(response, entityType, bodyAsObject)) .switchIfEmpty(Mono.defer((Supplier<Mono<Response<?>>>) () -> createResponse(response, entityType, null))); } } else { return handleBodyReturnType(response, methodParser, entityType); } } @SuppressWarnings("unchecked") private Mono<Response<?>> createResponse(HttpDecodedResponse response, Type entityType, Object bodyAsObject) { Class<? extends Response<?>> cls = (Class<? extends Response<?>>) TypeUtil.getRawClass(entityType); if (cls.equals(Response.class)) { cls = (Class<? extends Response<?>>) (Object) ResponseBase.class; } else if (cls.equals(PagedResponse.class)) { cls = (Class<? extends Response<?>>) (Object) PagedResponseBase.class; if (bodyAsObject != null && !TypeUtil.isTypeOrSubTypeOf(bodyAsObject.getClass(), Page.class)) { return monoError(logger, new RuntimeException(MUST_IMPLEMENT_PAGE_ERROR)); } } final Class<? extends Response<?>> clsFinal = cls; return Mono.just(RESPONSE_CONSTRUCTORS_CACHE.get(clsFinal)) .switchIfEmpty(Mono.defer(() -> Mono.error(new RuntimeException("Cannot find suitable constructor for class " + clsFinal)))) .flatMap(ctr -> RESPONSE_CONSTRUCTORS_CACHE.invoke(ctr, response, bodyAsObject)); } private Mono<?> handleBodyReturnType(final HttpDecodedResponse response, final SwaggerMethodParser methodParser, final Type entityType) { final int responseStatusCode = response.getSourceResponse().getStatusCode(); final HttpMethod httpMethod = methodParser.getHttpMethod(); final Type returnValueWireType = methodParser.getReturnValueWireType(); final Mono<?> asyncResult; if (httpMethod == HttpMethod.HEAD && (TypeUtil.isTypeOrSubTypeOf( entityType, Boolean.TYPE) || TypeUtil.isTypeOrSubTypeOf(entityType, Boolean.class))) { boolean isSuccess = (responseStatusCode / 100) == 2; asyncResult = Mono.just(isSuccess); } else if (TypeUtil.isTypeOrSubTypeOf(entityType, byte[].class)) { Mono<byte[]> responseBodyBytesAsync = response.getSourceResponse().getBodyAsByteArray(); if (returnValueWireType == Base64Url.class) { responseBodyBytesAsync = responseBodyBytesAsync .mapNotNull(base64UrlBytes -> new Base64Url(base64UrlBytes).decodedBytes()); } asyncResult = responseBodyBytesAsync; } else if (FluxUtil.isFluxByteBuffer(entityType)) { asyncResult = Mono.just(response.getSourceResponse().getBody()); } else if (TypeUtil.isTypeOrSubTypeOf(entityType, BinaryData.class)) { asyncResult = BinaryData.fromFlux(response.getSourceResponse().getBody()); } else { asyncResult = response.getDecodedBody((byte[]) null); } return asyncResult; } /** * Handle the provided asynchronous HTTP response and return the deserialized value. * * @param asyncHttpDecodedResponse the asynchronous HTTP response to the original HTTP request * @param methodParser the SwaggerMethodParser that the request originates from * @param returnType the type of value that will be returned * @param context Additional context that is passed through the Http pipeline during the service call. * @return the deserialized result */ private Object handleRestReturnType(final Mono<HttpDecodedResponse> asyncHttpDecodedResponse, final SwaggerMethodParser methodParser, final Type returnType, final Context context, final RequestOptions options) { final Mono<HttpDecodedResponse> asyncExpectedResponse = ensureExpectedStatus(asyncHttpDecodedResponse, methodParser, options) .doOnEach(RestProxy::endTracingSpan) .contextWrite(reactor.util.context.Context.of("TRACING_CONTEXT", context)); final Object result; if (TypeUtil.isTypeOrSubTypeOf(returnType, Mono.class)) { final Type monoTypeParam = TypeUtil.getTypeArgument(returnType); if (TypeUtil.isTypeOrSubTypeOf(monoTypeParam, Void.class)) { result = asyncExpectedResponse.then(); } else { result = asyncExpectedResponse.flatMap(response -> handleRestResponseReturnType(response, methodParser, monoTypeParam)); } } else if (FluxUtil.isFluxByteBuffer(returnType)) { result = asyncExpectedResponse.flatMapMany(ar -> ar.getSourceResponse().getBody()); } else if (TypeUtil.isTypeOrSubTypeOf(returnType, void.class) || TypeUtil.isTypeOrSubTypeOf(returnType, Void.class)) { asyncExpectedResponse.block(); result = null; } else { result = asyncExpectedResponse .flatMap(httpResponse -> handleRestResponseReturnType(httpResponse, methodParser, returnType)) .block(); } return result; } private static void endTracingSpan(Signal<HttpDecodedResponse> signal) { if (!TracerProxy.isTracingEnabled()) { return; } if (signal.isOnComplete() || signal.isOnSubscribe()) { return; } ContextView context = signal.getContextView(); Optional<Context> tracingContext = context.getOrEmpty("TRACING_CONTEXT"); boolean disableTracing = Boolean.TRUE.equals(context.getOrDefault(Tracer.DISABLE_TRACING_KEY, false)); if (!tracingContext.isPresent() || disableTracing) { return; } int statusCode = 0; HttpDecodedResponse httpDecodedResponse; Throwable throwable = null; if (signal.hasValue()) { httpDecodedResponse = signal.get(); statusCode = httpDecodedResponse.getSourceResponse().getStatusCode(); } else if (signal.hasError()) { throwable = signal.getThrowable(); if (throwable instanceof HttpResponseException) { HttpResponseException exception = (HttpResponseException) throwable; statusCode = exception.getResponse().getStatusCode(); } } TracerProxy.end(statusCode, throwable, tracingContext.get()); } /** * Create an instance of the default serializer. * * @return the default serializer */ private static SerializerAdapter createDefaultSerializer() { return JacksonAdapter.createDefaultSerializerAdapter(); } /** * Create the default HttpPipeline. * * @return the default HttpPipeline */ private static HttpPipeline createDefaultPipeline() { List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new UserAgentPolicy()); policies.add(new RetryPolicy()); policies.add(new CookiePolicy()); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param <A> the type of the Swagger interface * @return a proxy implementation of the provided Swagger interface */ public static <A> A create(Class<A> swaggerInterface) { return create(swaggerInterface, createDefaultPipeline(), createDefaultSerializer()); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param httpPipeline the HttpPipelinePolicy and HttpClient pipeline that will be used to send Http requests * @param <A> the type of the Swagger interface * @return a proxy implementation of the provided Swagger interface */ public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline) { return create(swaggerInterface, httpPipeline, createDefaultSerializer()); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param httpPipeline the HttpPipelinePolicy and HttpClient pipline that will be used to send Http requests * @param serializer the serializer that will be used to convert POJOs to and from request and response bodies * @param <A> the type of the Swagger interface. * @return a proxy implementation of the provided Swagger interface */ @SuppressWarnings("unchecked") public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline, SerializerAdapter serializer) { final SwaggerInterfaceParser interfaceParser = new SwaggerInterfaceParser(swaggerInterface, serializer); final RestProxy restProxy = new RestProxy(httpPipeline, serializer, interfaceParser); return (A) Proxy.newProxyInstance(swaggerInterface.getClassLoader(), new Class<?>[]{swaggerInterface}, restProxy); } }
can you rephrase this?
public Object invoke(Object proxy, final Method method, Object[] args) { validateResumeOperationIsNotPresent(method); try { final SwaggerMethodParser methodParser = getMethodParser(method); final HttpRequest request = createHttpRequest(methodParser, args); Context context = methodParser.setContext(args); RequestOptions options = methodParser.setRequestOptions(args); if (options != null) { Context optionsContext = options.getContext(); if (optionsContext != null && optionsContext != Context.NONE) { for (Map.Entry<Object, Object> kvp : optionsContext.getValues().entrySet()) { context = context.addData(kvp.getKey(), kvp.getValue()); } } } context = context.addData("caller-method", methodParser.getFullyQualifiedMethodName()) .addData("azure-eagerly-read-response", shouldEagerlyReadResponse(methodParser.getReturnType())); context = startTracingSpan(method, context); if (request.getBody() != null) { request.setBody(validateLength(request)); } if (options != null) { options.getRequestCallback().accept(request); } final Mono<HttpResponse> asyncResponse = send(request, context); Mono<HttpDecodedResponse> asyncDecodedResponse = this.decoder.decode(asyncResponse, methodParser); return handleRestReturnType(asyncDecodedResponse, methodParser, methodParser.getReturnType(), context, options); } catch (IOException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } }
public Object invoke(Object proxy, final Method method, Object[] args) { validateResumeOperationIsNotPresent(method); try { final SwaggerMethodParser methodParser = getMethodParser(method); final HttpRequest request = createHttpRequest(methodParser, args); Context context = methodParser.setContext(args); RequestOptions options = methodParser.setRequestOptions(args); context = mergeRequestOptionsContext(context, options); context = context.addData("caller-method", methodParser.getFullyQualifiedMethodName()) .addData("azure-eagerly-read-response", shouldEagerlyReadResponse(methodParser.getReturnType())); context = startTracingSpan(method, context); if (request.getBody() != null) { request.setBody(validateLength(request)); } if (options != null) { options.getRequestCallback().accept(request); } final Mono<HttpResponse> asyncResponse = send(request, context); Mono<HttpDecodedResponse> asyncDecodedResponse = this.decoder.decode(asyncResponse, methodParser); return handleRestReturnType(asyncDecodedResponse, methodParser, methodParser.getReturnType(), context, options); } catch (IOException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } }
class RestProxy implements InvocationHandler { private static final ByteBuffer VALIDATION_BUFFER = ByteBuffer.allocate(0); private static final String BODY_TOO_LARGE = "Request body emitted %d bytes, more than the expected %d bytes."; private static final String BODY_TOO_SMALL = "Request body emitted %d bytes, less than the expected %d bytes."; private static final String MUST_IMPLEMENT_PAGE_ERROR = "Unable to create PagedResponse<T>. Body must be of a type that implements: " + Page.class; private static final ResponseConstructorsCache RESPONSE_CONSTRUCTORS_CACHE = new ResponseConstructorsCache(); private final ClientLogger logger = new ClientLogger(RestProxy.class); private final HttpPipeline httpPipeline; private final SerializerAdapter serializer; private final SwaggerInterfaceParser interfaceParser; private final HttpResponseDecoder decoder; /** * Create a RestProxy. * * @param httpPipeline the HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP requests. * @param serializer the serializer that will be used to convert response bodies to POJOs. * @param interfaceParser the parser that contains information about the interface describing REST API methods that * this RestProxy "implements". */ private RestProxy(HttpPipeline httpPipeline, SerializerAdapter serializer, SwaggerInterfaceParser interfaceParser) { this.httpPipeline = httpPipeline; this.serializer = serializer; this.interfaceParser = interfaceParser; this.decoder = new HttpResponseDecoder(this.serializer); } /** * Get the SwaggerMethodParser for the provided method. The Method must exist on the Swagger interface that this * RestProxy was created to "implement". * * @param method the method to get a SwaggerMethodParser for * @return the SwaggerMethodParser for the provided method */ private SwaggerMethodParser getMethodParser(Method method) { return interfaceParser.getMethodParser(method); } /** * Send the provided request asynchronously, applying any request policies provided to the HttpClient instance. * * @param request the HTTP request to send * @param contextData the context * @return a {@link Mono} that emits HttpResponse asynchronously */ public Mono<HttpResponse> send(HttpRequest request, Context contextData) { return httpPipeline.send(request, contextData); } @Override @SuppressWarnings("deprecation") void validateResumeOperationIsNotPresent(Method method) { if (method.isAnnotationPresent(com.azure.core.annotation.ResumeOperation.class)) { throw logger.logExceptionAsError(Exceptions.propagate(new Exception("'ResumeOperation' isn't supported."))); } } static Flux<ByteBuffer> validateLength(final HttpRequest request) { final Flux<ByteBuffer> bbFlux = request.getBody(); if (bbFlux == null) { return Flux.empty(); } final long expectedLength = Long.parseLong(request.getHeaders().getValue("Content-Length")); return Flux.defer(() -> { final long[] currentTotalLength = new long[1]; return Flux.concat(bbFlux, Flux.just(VALIDATION_BUFFER)).handle((buffer, sink) -> { if (buffer == null) { return; } if (buffer == VALIDATION_BUFFER) { if (expectedLength != currentTotalLength[0]) { sink.error(new UnexpectedLengthException(String.format(BODY_TOO_SMALL, currentTotalLength[0], expectedLength), currentTotalLength[0], expectedLength)); } else { sink.complete(); } return; } currentTotalLength[0] += buffer.remaining(); if (currentTotalLength[0] > expectedLength) { sink.error(new UnexpectedLengthException(String.format(BODY_TOO_LARGE, currentTotalLength[0], expectedLength), currentTotalLength[0], expectedLength)); return; } sink.next(buffer); }); }); } /** * Starts the tracing span for the current service call, additionally set metadata attributes on the span by passing * additional context information. * * @param method Service method being called. * @param context Context information about the current service call. * @return The updated context containing the span context. */ private Context startTracingSpan(Method method, Context context) { if (!TracerProxy.isTracingEnabled()) { return context; } if ((boolean) context.getData(Tracer.DISABLE_TRACING_KEY).orElse(false)) { return context; } String spanName = interfaceParser.getServiceName() + "." + method.getName(); context = TracerProxy.setSpanName(spanName, context); return TracerProxy.start(spanName, context); } /** * Create a HttpRequest for the provided Swagger method using the provided arguments. * * @param methodParser the Swagger method parser to use * @param args the arguments to use to populate the method's annotation values * @return a HttpRequest * @throws IOException thrown if the body contents cannot be serialized */ private HttpRequest createHttpRequest(SwaggerMethodParser methodParser, Object[] args) throws IOException { final String path = methodParser.setPath(args); final UrlBuilder pathUrlBuilder = UrlBuilder.parse(path); final UrlBuilder urlBuilder; if (pathUrlBuilder.getScheme() != null) { urlBuilder = pathUrlBuilder; } else { urlBuilder = new UrlBuilder(); methodParser.setSchemeAndHost(args, urlBuilder); if (path != null && !path.isEmpty() && !"/".equals(path)) { String hostPath = urlBuilder.getPath(); if (hostPath == null || hostPath.isEmpty() || "/".equals(hostPath) || path.contains(": urlBuilder.setPath(path); } else { if (path.startsWith("/")) { urlBuilder.setPath(hostPath + path); } else { urlBuilder.setPath(hostPath + "/" + path); } } } } methodParser.setEncodedQueryParameters(args, urlBuilder); final URL url = urlBuilder.toUrl(); final HttpRequest request = configRequest(new HttpRequest(methodParser.getHttpMethod(), url), methodParser, args); HttpHeaders httpHeaders = request.getHeaders(); methodParser.setHeaders(args, httpHeaders); return request; } @SuppressWarnings("unchecked") private HttpRequest configRequest(final HttpRequest request, final SwaggerMethodParser methodParser, final Object[] args) throws IOException { final Object bodyContentObject = methodParser.setBody(args); if (bodyContentObject == null) { request.getHeaders().set("Content-Length", "0"); } else { String contentType = methodParser.getBodyContentType(); if (contentType == null || contentType.isEmpty()) { if (bodyContentObject instanceof byte[] || bodyContentObject instanceof String) { contentType = ContentType.APPLICATION_OCTET_STREAM; } else { contentType = ContentType.APPLICATION_JSON; } } request.getHeaders().set("Content-Type", contentType); if (bodyContentObject instanceof BinaryData) { BinaryData binaryData = (BinaryData) bodyContentObject; if (binaryData.getLength() != null) { request.setHeader("Content-Length", binaryData.getLength().toString()); } request.setBody(binaryData.toFluxByteBuffer()); return request; } boolean isJson = false; final String[] contentTypeParts = contentType.split(";"); for (final String contentTypePart : contentTypeParts) { if (contentTypePart.trim().equalsIgnoreCase(ContentType.APPLICATION_JSON)) { isJson = true; break; } } if (isJson) { ByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(); serializer.serialize(bodyContentObject, SerializerEncoding.JSON, stream); request.setHeader("Content-Length", String.valueOf(stream.size())); request.setBody(Flux.defer(() -> Flux.just(ByteBuffer.wrap(stream.toByteArray(), 0, stream.size())))); } else if (FluxUtil.isFluxByteBuffer(methodParser.getBodyJavaType())) { request.setBody((Flux<ByteBuffer>) bodyContentObject); } else if (bodyContentObject instanceof byte[]) { request.setBody((byte[]) bodyContentObject); } else if (bodyContentObject instanceof String) { final String bodyContentString = (String) bodyContentObject; if (!bodyContentString.isEmpty()) { request.setBody(bodyContentString); } } else if (bodyContentObject instanceof ByteBuffer) { request.setBody(Flux.just((ByteBuffer) bodyContentObject)); } else { ByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(); serializer.serialize(bodyContentObject, SerializerEncoding.fromHeaders(request.getHeaders()), stream); request.setHeader("Content-Length", String.valueOf(stream.size())); request.setBody(Flux.defer(() -> Flux.just(ByteBuffer.wrap(stream.toByteArray(), 0, stream.size())))); } } return request; } private Mono<HttpDecodedResponse> ensureExpectedStatus(final Mono<HttpDecodedResponse> asyncDecodedResponse, final SwaggerMethodParser methodParser, RequestOptions options) { return asyncDecodedResponse .flatMap(decodedHttpResponse -> ensureExpectedStatus(decodedHttpResponse, methodParser, options)); } private static Exception instantiateUnexpectedException(final UnexpectedExceptionInformation exception, final HttpResponse httpResponse, final byte[] responseContent, final Object responseDecodedContent) { final int responseStatusCode = httpResponse.getStatusCode(); final String contentType = httpResponse.getHeaderValue("Content-Type"); final String bodyRepresentation; if ("application/octet-stream".equalsIgnoreCase(contentType)) { bodyRepresentation = "(" + httpResponse.getHeaderValue("Content-Length") + "-byte body)"; } else { bodyRepresentation = responseContent == null || responseContent.length == 0 ? "(empty body)" : "\"" + new String(responseContent, StandardCharsets.UTF_8) + "\""; } Exception result; try { final Constructor<? extends HttpResponseException> exceptionConstructor = exception.getExceptionType() .getConstructor(String.class, HttpResponse.class, exception.getExceptionBodyType()); result = exceptionConstructor.newInstance("Status code " + responseStatusCode + ", " + bodyRepresentation, httpResponse, responseDecodedContent); } catch (ReflectiveOperationException e) { String message = "Status code " + responseStatusCode + ", but an instance of " + exception.getExceptionType().getCanonicalName() + " cannot be created." + " Response body: " + bodyRepresentation; result = new IOException(message, e); } return result; } /** * Create a publisher that (1) emits error if the provided response {@code decodedResponse} has 'disallowed status * code' OR (2) emits provided response if it's status code ia allowed. * * 'disallowed status code' is one of the status code defined in the provided SwaggerMethodParser or is in the int[] * of additional allowed status codes. * * @param decodedResponse The HttpResponse to check. * @param methodParser The method parser that contains information about the service interface method that initiated * the HTTP request. * @return An async-version of the provided decodedResponse. */ private Mono<HttpDecodedResponse> ensureExpectedStatus(final HttpDecodedResponse decodedResponse, final SwaggerMethodParser methodParser, RequestOptions options) { final int responseStatusCode = decodedResponse.getSourceResponse().getStatusCode(); if (methodParser.isExpectedResponseStatusCode(responseStatusCode) || (options != null && !options.isThrowOnError())) { return Mono.just(decodedResponse); } return decodedResponse.getSourceResponse().getBodyAsByteArray() .switchIfEmpty(Mono.defer(() -> { return Mono.error(instantiateUnexpectedException( methodParser.getUnexpectedException(responseStatusCode), decodedResponse.getSourceResponse(), null, null)); })) .flatMap(responseBytes -> decodedResponse.getDecodedBody(responseBytes) .switchIfEmpty(Mono.defer(() -> { return Mono.error(instantiateUnexpectedException( methodParser.getUnexpectedException(responseStatusCode), decodedResponse.getSourceResponse(), responseBytes, null)); })) .flatMap(decodedBody -> { return Mono.error(instantiateUnexpectedException( methodParser.getUnexpectedException(responseStatusCode), decodedResponse.getSourceResponse(), responseBytes, decodedBody)); })); } private Mono<?> handleRestResponseReturnType(final HttpDecodedResponse response, final SwaggerMethodParser methodParser, final Type entityType) { if (TypeUtil.isTypeOrSubTypeOf(entityType, Response.class)) { final Type bodyType = TypeUtil.getRestResponseBodyType(entityType); if (TypeUtil.isTypeOrSubTypeOf(bodyType, Void.class)) { return response.getSourceResponse().getBody().ignoreElements() .then(createResponse(response, entityType, null)); } else { return handleBodyReturnType(response, methodParser, bodyType) .flatMap(bodyAsObject -> createResponse(response, entityType, bodyAsObject)) .switchIfEmpty(Mono.defer((Supplier<Mono<Response<?>>>) () -> createResponse(response, entityType, null))); } } else { return handleBodyReturnType(response, methodParser, entityType); } } @SuppressWarnings("unchecked") private Mono<Response<?>> createResponse(HttpDecodedResponse response, Type entityType, Object bodyAsObject) { Class<? extends Response<?>> cls = (Class<? extends Response<?>>) TypeUtil.getRawClass(entityType); if (cls.equals(Response.class)) { cls = (Class<? extends Response<?>>) (Object) ResponseBase.class; } else if (cls.equals(PagedResponse.class)) { cls = (Class<? extends Response<?>>) (Object) PagedResponseBase.class; if (bodyAsObject != null && !TypeUtil.isTypeOrSubTypeOf(bodyAsObject.getClass(), Page.class)) { return monoError(logger, new RuntimeException(MUST_IMPLEMENT_PAGE_ERROR)); } } final Class<? extends Response<?>> clsFinal = cls; return Mono.just(RESPONSE_CONSTRUCTORS_CACHE.get(clsFinal)) .switchIfEmpty(Mono.defer(() -> Mono.error(new RuntimeException("Cannot find suitable constructor for class " + clsFinal)))) .flatMap(ctr -> RESPONSE_CONSTRUCTORS_CACHE.invoke(ctr, response, bodyAsObject)); } private Mono<?> handleBodyReturnType(final HttpDecodedResponse response, final SwaggerMethodParser methodParser, final Type entityType) { final int responseStatusCode = response.getSourceResponse().getStatusCode(); final HttpMethod httpMethod = methodParser.getHttpMethod(); final Type returnValueWireType = methodParser.getReturnValueWireType(); final Mono<?> asyncResult; if (httpMethod == HttpMethod.HEAD && (TypeUtil.isTypeOrSubTypeOf( entityType, Boolean.TYPE) || TypeUtil.isTypeOrSubTypeOf(entityType, Boolean.class))) { boolean isSuccess = (responseStatusCode / 100) == 2; asyncResult = Mono.just(isSuccess); } else if (TypeUtil.isTypeOrSubTypeOf(entityType, byte[].class)) { Mono<byte[]> responseBodyBytesAsync = response.getSourceResponse().getBodyAsByteArray(); if (returnValueWireType == Base64Url.class) { responseBodyBytesAsync = responseBodyBytesAsync .mapNotNull(base64UrlBytes -> new Base64Url(base64UrlBytes).decodedBytes()); } asyncResult = responseBodyBytesAsync; } else if (FluxUtil.isFluxByteBuffer(entityType)) { asyncResult = Mono.just(response.getSourceResponse().getBody()); } else if (TypeUtil.isTypeOrSubTypeOf(entityType, BinaryData.class)) { asyncResult = BinaryData.fromFlux(response.getSourceResponse().getBody()); } else { asyncResult = response.getDecodedBody((byte[]) null); } return asyncResult; } /** * Handle the provided asynchronous HTTP response and return the deserialized value. * * @param asyncHttpDecodedResponse the asynchronous HTTP response to the original HTTP request * @param methodParser the SwaggerMethodParser that the request originates from * @param returnType the type of value that will be returned * @param context Additional context that is passed through the Http pipeline during the service call. * @return the deserialized result */ private Object handleRestReturnType(final Mono<HttpDecodedResponse> asyncHttpDecodedResponse, final SwaggerMethodParser methodParser, final Type returnType, final Context context, final RequestOptions options) { final Mono<HttpDecodedResponse> asyncExpectedResponse = ensureExpectedStatus(asyncHttpDecodedResponse, methodParser, options) .doOnEach(RestProxy::endTracingSpan) .contextWrite(reactor.util.context.Context.of("TRACING_CONTEXT", context)); final Object result; if (TypeUtil.isTypeOrSubTypeOf(returnType, Mono.class)) { final Type monoTypeParam = TypeUtil.getTypeArgument(returnType); if (TypeUtil.isTypeOrSubTypeOf(monoTypeParam, Void.class)) { result = asyncExpectedResponse.then(); } else { result = asyncExpectedResponse.flatMap(response -> handleRestResponseReturnType(response, methodParser, monoTypeParam)); } } else if (FluxUtil.isFluxByteBuffer(returnType)) { result = asyncExpectedResponse.flatMapMany(ar -> ar.getSourceResponse().getBody()); } else if (TypeUtil.isTypeOrSubTypeOf(returnType, void.class) || TypeUtil.isTypeOrSubTypeOf(returnType, Void.class)) { asyncExpectedResponse.block(); result = null; } else { result = asyncExpectedResponse .flatMap(httpResponse -> handleRestResponseReturnType(httpResponse, methodParser, returnType)) .block(); } return result; } private static void endTracingSpan(Signal<HttpDecodedResponse> signal) { if (!TracerProxy.isTracingEnabled()) { return; } if (signal.isOnComplete() || signal.isOnSubscribe()) { return; } ContextView context = signal.getContextView(); Optional<Context> tracingContext = context.getOrEmpty("TRACING_CONTEXT"); boolean disableTracing = Boolean.TRUE.equals(context.getOrDefault(Tracer.DISABLE_TRACING_KEY, false)); if (!tracingContext.isPresent() || disableTracing) { return; } int statusCode = 0; HttpDecodedResponse httpDecodedResponse; Throwable throwable = null; if (signal.hasValue()) { httpDecodedResponse = signal.get(); statusCode = httpDecodedResponse.getSourceResponse().getStatusCode(); } else if (signal.hasError()) { throwable = signal.getThrowable(); if (throwable instanceof HttpResponseException) { HttpResponseException exception = (HttpResponseException) throwable; statusCode = exception.getResponse().getStatusCode(); } } TracerProxy.end(statusCode, throwable, tracingContext.get()); } /** * Create an instance of the default serializer. * * @return the default serializer */ private static SerializerAdapter createDefaultSerializer() { return JacksonAdapter.createDefaultSerializerAdapter(); } /** * Create the default HttpPipeline. * * @return the default HttpPipeline */ private static HttpPipeline createDefaultPipeline() { List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new UserAgentPolicy()); policies.add(new RetryPolicy()); policies.add(new CookiePolicy()); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param <A> the type of the Swagger interface * @return a proxy implementation of the provided Swagger interface */ public static <A> A create(Class<A> swaggerInterface) { return create(swaggerInterface, createDefaultPipeline(), createDefaultSerializer()); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param httpPipeline the HttpPipelinePolicy and HttpClient pipeline that will be used to send Http requests * @param <A> the type of the Swagger interface * @return a proxy implementation of the provided Swagger interface */ public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline) { return create(swaggerInterface, httpPipeline, createDefaultSerializer()); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param httpPipeline the HttpPipelinePolicy and HttpClient pipline that will be used to send Http requests * @param serializer the serializer that will be used to convert POJOs to and from request and response bodies * @param <A> the type of the Swagger interface. * @return a proxy implementation of the provided Swagger interface */ @SuppressWarnings("unchecked") public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline, SerializerAdapter serializer) { final SwaggerInterfaceParser interfaceParser = new SwaggerInterfaceParser(swaggerInterface, serializer); final RestProxy restProxy = new RestProxy(httpPipeline, serializer, interfaceParser); return (A) Proxy.newProxyInstance(swaggerInterface.getClassLoader(), new Class<?>[]{swaggerInterface}, restProxy); } }
class RestProxy implements InvocationHandler { private static final ByteBuffer VALIDATION_BUFFER = ByteBuffer.allocate(0); private static final String BODY_TOO_LARGE = "Request body emitted %d bytes, more than the expected %d bytes."; private static final String BODY_TOO_SMALL = "Request body emitted %d bytes, less than the expected %d bytes."; private static final String MUST_IMPLEMENT_PAGE_ERROR = "Unable to create PagedResponse<T>. Body must be of a type that implements: " + Page.class; private static final ResponseConstructorsCache RESPONSE_CONSTRUCTORS_CACHE = new ResponseConstructorsCache(); private final ClientLogger logger = new ClientLogger(RestProxy.class); private final HttpPipeline httpPipeline; private final SerializerAdapter serializer; private final SwaggerInterfaceParser interfaceParser; private final HttpResponseDecoder decoder; /** * Create a RestProxy. * * @param httpPipeline the HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP requests. * @param serializer the serializer that will be used to convert response bodies to POJOs. * @param interfaceParser the parser that contains information about the interface describing REST API methods that * this RestProxy "implements". */ private RestProxy(HttpPipeline httpPipeline, SerializerAdapter serializer, SwaggerInterfaceParser interfaceParser) { this.httpPipeline = httpPipeline; this.serializer = serializer; this.interfaceParser = interfaceParser; this.decoder = new HttpResponseDecoder(this.serializer); } /** * Get the SwaggerMethodParser for the provided method. The Method must exist on the Swagger interface that this * RestProxy was created to "implement". * * @param method the method to get a SwaggerMethodParser for * @return the SwaggerMethodParser for the provided method */ private SwaggerMethodParser getMethodParser(Method method) { return interfaceParser.getMethodParser(method); } /** * Send the provided request asynchronously, applying any request policies provided to the HttpClient instance. * * @param request the HTTP request to send * @param contextData the context * @return a {@link Mono} that emits HttpResponse asynchronously */ public Mono<HttpResponse> send(HttpRequest request, Context contextData) { return httpPipeline.send(request, contextData); } @Override @SuppressWarnings("deprecation") void validateResumeOperationIsNotPresent(Method method) { if (method.isAnnotationPresent(com.azure.core.annotation.ResumeOperation.class)) { throw logger.logExceptionAsError(Exceptions.propagate(new Exception("'ResumeOperation' isn't supported."))); } } static Context mergeRequestOptionsContext(Context context, RequestOptions options) { if (options == null) { return context; } Context optionsContext = options.getContext(); if (optionsContext != null && optionsContext != Context.NONE) { for (Map.Entry<Object, Object> kvp : optionsContext.getValues().entrySet()) { context = context.addData(kvp.getKey(), kvp.getValue()); } } return context; } static Flux<ByteBuffer> validateLength(final HttpRequest request) { final Flux<ByteBuffer> bbFlux = request.getBody(); if (bbFlux == null) { return Flux.empty(); } final long expectedLength = Long.parseLong(request.getHeaders().getValue("Content-Length")); return Flux.defer(() -> { final long[] currentTotalLength = new long[1]; return Flux.concat(bbFlux, Flux.just(VALIDATION_BUFFER)).handle((buffer, sink) -> { if (buffer == null) { return; } if (buffer == VALIDATION_BUFFER) { if (expectedLength != currentTotalLength[0]) { sink.error(new UnexpectedLengthException(String.format(BODY_TOO_SMALL, currentTotalLength[0], expectedLength), currentTotalLength[0], expectedLength)); } else { sink.complete(); } return; } currentTotalLength[0] += buffer.remaining(); if (currentTotalLength[0] > expectedLength) { sink.error(new UnexpectedLengthException(String.format(BODY_TOO_LARGE, currentTotalLength[0], expectedLength), currentTotalLength[0], expectedLength)); return; } sink.next(buffer); }); }); } /** * Starts the tracing span for the current service call, additionally set metadata attributes on the span by passing * additional context information. * * @param method Service method being called. * @param context Context information about the current service call. * @return The updated context containing the span context. */ private Context startTracingSpan(Method method, Context context) { if (!TracerProxy.isTracingEnabled()) { return context; } if ((boolean) context.getData(Tracer.DISABLE_TRACING_KEY).orElse(false)) { return context; } String spanName = interfaceParser.getServiceName() + "." + method.getName(); context = TracerProxy.setSpanName(spanName, context); return TracerProxy.start(spanName, context); } /** * Create a HttpRequest for the provided Swagger method using the provided arguments. * * @param methodParser the Swagger method parser to use * @param args the arguments to use to populate the method's annotation values * @return a HttpRequest * @throws IOException thrown if the body contents cannot be serialized */ private HttpRequest createHttpRequest(SwaggerMethodParser methodParser, Object[] args) throws IOException { final String path = methodParser.setPath(args); final UrlBuilder pathUrlBuilder = UrlBuilder.parse(path); final UrlBuilder urlBuilder; if (pathUrlBuilder.getScheme() != null) { urlBuilder = pathUrlBuilder; } else { urlBuilder = new UrlBuilder(); methodParser.setSchemeAndHost(args, urlBuilder); if (path != null && !path.isEmpty() && !"/".equals(path)) { String hostPath = urlBuilder.getPath(); if (hostPath == null || hostPath.isEmpty() || "/".equals(hostPath) || path.contains(": urlBuilder.setPath(path); } else { if (path.startsWith("/")) { urlBuilder.setPath(hostPath + path); } else { urlBuilder.setPath(hostPath + "/" + path); } } } } methodParser.setEncodedQueryParameters(args, urlBuilder); final URL url = urlBuilder.toUrl(); final HttpRequest request = configRequest(new HttpRequest(methodParser.getHttpMethod(), url), methodParser, args); HttpHeaders httpHeaders = request.getHeaders(); methodParser.setHeaders(args, httpHeaders); return request; } @SuppressWarnings("unchecked") private HttpRequest configRequest(final HttpRequest request, final SwaggerMethodParser methodParser, final Object[] args) throws IOException { final Object bodyContentObject = methodParser.setBody(args); if (bodyContentObject == null) { request.getHeaders().set("Content-Length", "0"); } else { String contentType = methodParser.getBodyContentType(); if (contentType == null || contentType.isEmpty()) { if (bodyContentObject instanceof byte[] || bodyContentObject instanceof String) { contentType = ContentType.APPLICATION_OCTET_STREAM; } else { contentType = ContentType.APPLICATION_JSON; } } request.getHeaders().set("Content-Type", contentType); if (bodyContentObject instanceof BinaryData) { BinaryData binaryData = (BinaryData) bodyContentObject; if (binaryData.getLength() != null) { request.setHeader("Content-Length", binaryData.getLength().toString()); } request.setBody(binaryData.toFluxByteBuffer()); return request; } boolean isJson = false; final String[] contentTypeParts = contentType.split(";"); for (final String contentTypePart : contentTypeParts) { if (contentTypePart.trim().equalsIgnoreCase(ContentType.APPLICATION_JSON)) { isJson = true; break; } } if (isJson) { ByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(); serializer.serialize(bodyContentObject, SerializerEncoding.JSON, stream); request.setHeader("Content-Length", String.valueOf(stream.size())); request.setBody(Flux.defer(() -> Flux.just(ByteBuffer.wrap(stream.toByteArray(), 0, stream.size())))); } else if (FluxUtil.isFluxByteBuffer(methodParser.getBodyJavaType())) { request.setBody((Flux<ByteBuffer>) bodyContentObject); } else if (bodyContentObject instanceof byte[]) { request.setBody((byte[]) bodyContentObject); } else if (bodyContentObject instanceof String) { final String bodyContentString = (String) bodyContentObject; if (!bodyContentString.isEmpty()) { request.setBody(bodyContentString); } } else if (bodyContentObject instanceof ByteBuffer) { request.setBody(Flux.just((ByteBuffer) bodyContentObject)); } else { ByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(); serializer.serialize(bodyContentObject, SerializerEncoding.fromHeaders(request.getHeaders()), stream); request.setHeader("Content-Length", String.valueOf(stream.size())); request.setBody(Flux.defer(() -> Flux.just(ByteBuffer.wrap(stream.toByteArray(), 0, stream.size())))); } } return request; } private Mono<HttpDecodedResponse> ensureExpectedStatus(final Mono<HttpDecodedResponse> asyncDecodedResponse, final SwaggerMethodParser methodParser, RequestOptions options) { return asyncDecodedResponse .flatMap(decodedHttpResponse -> ensureExpectedStatus(decodedHttpResponse, methodParser, options)); } private static Exception instantiateUnexpectedException(final UnexpectedExceptionInformation exception, final HttpResponse httpResponse, final byte[] responseContent, final Object responseDecodedContent) { final int responseStatusCode = httpResponse.getStatusCode(); final String contentType = httpResponse.getHeaderValue("Content-Type"); final String bodyRepresentation; if ("application/octet-stream".equalsIgnoreCase(contentType)) { bodyRepresentation = "(" + httpResponse.getHeaderValue("Content-Length") + "-byte body)"; } else { bodyRepresentation = responseContent == null || responseContent.length == 0 ? "(empty body)" : "\"" + new String(responseContent, StandardCharsets.UTF_8) + "\""; } Exception result; try { final Constructor<? extends HttpResponseException> exceptionConstructor = exception.getExceptionType() .getConstructor(String.class, HttpResponse.class, exception.getExceptionBodyType()); result = exceptionConstructor.newInstance("Status code " + responseStatusCode + ", " + bodyRepresentation, httpResponse, responseDecodedContent); } catch (ReflectiveOperationException e) { String message = "Status code " + responseStatusCode + ", but an instance of " + exception.getExceptionType().getCanonicalName() + " cannot be created." + " Response body: " + bodyRepresentation; result = new IOException(message, e); } return result; } /** * Create a publisher that (1) emits error if the provided response {@code decodedResponse} has 'disallowed status * code' OR (2) emits provided response if it's status code ia allowed. * * 'disallowed status code' is one of the status code defined in the provided SwaggerMethodParser or is in the int[] * of additional allowed status codes. * * @param decodedResponse The HttpResponse to check. * @param methodParser The method parser that contains information about the service interface method that initiated * the HTTP request. * @return An async-version of the provided decodedResponse. */ private Mono<HttpDecodedResponse> ensureExpectedStatus(final HttpDecodedResponse decodedResponse, final SwaggerMethodParser methodParser, RequestOptions options) { final int responseStatusCode = decodedResponse.getSourceResponse().getStatusCode(); if (methodParser.isExpectedResponseStatusCode(responseStatusCode) || (options != null && !options.isThrowOnError())) { return Mono.just(decodedResponse); } return decodedResponse.getSourceResponse().getBodyAsByteArray() .switchIfEmpty(Mono.defer(() -> { return Mono.error(instantiateUnexpectedException( methodParser.getUnexpectedException(responseStatusCode), decodedResponse.getSourceResponse(), null, null)); })) .flatMap(responseBytes -> decodedResponse.getDecodedBody(responseBytes) .switchIfEmpty(Mono.defer(() -> { return Mono.error(instantiateUnexpectedException( methodParser.getUnexpectedException(responseStatusCode), decodedResponse.getSourceResponse(), responseBytes, null)); })) .flatMap(decodedBody -> { return Mono.error(instantiateUnexpectedException( methodParser.getUnexpectedException(responseStatusCode), decodedResponse.getSourceResponse(), responseBytes, decodedBody)); })); } private Mono<?> handleRestResponseReturnType(final HttpDecodedResponse response, final SwaggerMethodParser methodParser, final Type entityType) { if (TypeUtil.isTypeOrSubTypeOf(entityType, Response.class)) { final Type bodyType = TypeUtil.getRestResponseBodyType(entityType); if (TypeUtil.isTypeOrSubTypeOf(bodyType, Void.class)) { return response.getSourceResponse().getBody().ignoreElements() .then(createResponse(response, entityType, null)); } else { return handleBodyReturnType(response, methodParser, bodyType) .flatMap(bodyAsObject -> createResponse(response, entityType, bodyAsObject)) .switchIfEmpty(Mono.defer((Supplier<Mono<Response<?>>>) () -> createResponse(response, entityType, null))); } } else { return handleBodyReturnType(response, methodParser, entityType); } } @SuppressWarnings("unchecked") private Mono<Response<?>> createResponse(HttpDecodedResponse response, Type entityType, Object bodyAsObject) { Class<? extends Response<?>> cls = (Class<? extends Response<?>>) TypeUtil.getRawClass(entityType); if (cls.equals(Response.class)) { cls = (Class<? extends Response<?>>) (Object) ResponseBase.class; } else if (cls.equals(PagedResponse.class)) { cls = (Class<? extends Response<?>>) (Object) PagedResponseBase.class; if (bodyAsObject != null && !TypeUtil.isTypeOrSubTypeOf(bodyAsObject.getClass(), Page.class)) { return monoError(logger, new RuntimeException(MUST_IMPLEMENT_PAGE_ERROR)); } } final Class<? extends Response<?>> clsFinal = cls; return Mono.just(RESPONSE_CONSTRUCTORS_CACHE.get(clsFinal)) .switchIfEmpty(Mono.defer(() -> Mono.error(new RuntimeException("Cannot find suitable constructor for class " + clsFinal)))) .flatMap(ctr -> RESPONSE_CONSTRUCTORS_CACHE.invoke(ctr, response, bodyAsObject)); } private Mono<?> handleBodyReturnType(final HttpDecodedResponse response, final SwaggerMethodParser methodParser, final Type entityType) { final int responseStatusCode = response.getSourceResponse().getStatusCode(); final HttpMethod httpMethod = methodParser.getHttpMethod(); final Type returnValueWireType = methodParser.getReturnValueWireType(); final Mono<?> asyncResult; if (httpMethod == HttpMethod.HEAD && (TypeUtil.isTypeOrSubTypeOf( entityType, Boolean.TYPE) || TypeUtil.isTypeOrSubTypeOf(entityType, Boolean.class))) { boolean isSuccess = (responseStatusCode / 100) == 2; asyncResult = Mono.just(isSuccess); } else if (TypeUtil.isTypeOrSubTypeOf(entityType, byte[].class)) { Mono<byte[]> responseBodyBytesAsync = response.getSourceResponse().getBodyAsByteArray(); if (returnValueWireType == Base64Url.class) { responseBodyBytesAsync = responseBodyBytesAsync .mapNotNull(base64UrlBytes -> new Base64Url(base64UrlBytes).decodedBytes()); } asyncResult = responseBodyBytesAsync; } else if (FluxUtil.isFluxByteBuffer(entityType)) { asyncResult = Mono.just(response.getSourceResponse().getBody()); } else if (TypeUtil.isTypeOrSubTypeOf(entityType, BinaryData.class)) { asyncResult = BinaryData.fromFlux(response.getSourceResponse().getBody()); } else { asyncResult = response.getDecodedBody((byte[]) null); } return asyncResult; } /** * Handle the provided asynchronous HTTP response and return the deserialized value. * * @param asyncHttpDecodedResponse the asynchronous HTTP response to the original HTTP request * @param methodParser the SwaggerMethodParser that the request originates from * @param returnType the type of value that will be returned * @param context Additional context that is passed through the Http pipeline during the service call. * @return the deserialized result */ private Object handleRestReturnType(final Mono<HttpDecodedResponse> asyncHttpDecodedResponse, final SwaggerMethodParser methodParser, final Type returnType, final Context context, final RequestOptions options) { final Mono<HttpDecodedResponse> asyncExpectedResponse = ensureExpectedStatus(asyncHttpDecodedResponse, methodParser, options) .doOnEach(RestProxy::endTracingSpan) .contextWrite(reactor.util.context.Context.of("TRACING_CONTEXT", context)); final Object result; if (TypeUtil.isTypeOrSubTypeOf(returnType, Mono.class)) { final Type monoTypeParam = TypeUtil.getTypeArgument(returnType); if (TypeUtil.isTypeOrSubTypeOf(monoTypeParam, Void.class)) { result = asyncExpectedResponse.then(); } else { result = asyncExpectedResponse.flatMap(response -> handleRestResponseReturnType(response, methodParser, monoTypeParam)); } } else if (FluxUtil.isFluxByteBuffer(returnType)) { result = asyncExpectedResponse.flatMapMany(ar -> ar.getSourceResponse().getBody()); } else if (TypeUtil.isTypeOrSubTypeOf(returnType, void.class) || TypeUtil.isTypeOrSubTypeOf(returnType, Void.class)) { asyncExpectedResponse.block(); result = null; } else { result = asyncExpectedResponse .flatMap(httpResponse -> handleRestResponseReturnType(httpResponse, methodParser, returnType)) .block(); } return result; } private static void endTracingSpan(Signal<HttpDecodedResponse> signal) { if (!TracerProxy.isTracingEnabled()) { return; } if (signal.isOnComplete() || signal.isOnSubscribe()) { return; } ContextView context = signal.getContextView(); Optional<Context> tracingContext = context.getOrEmpty("TRACING_CONTEXT"); boolean disableTracing = Boolean.TRUE.equals(context.getOrDefault(Tracer.DISABLE_TRACING_KEY, false)); if (!tracingContext.isPresent() || disableTracing) { return; } int statusCode = 0; HttpDecodedResponse httpDecodedResponse; Throwable throwable = null; if (signal.hasValue()) { httpDecodedResponse = signal.get(); statusCode = httpDecodedResponse.getSourceResponse().getStatusCode(); } else if (signal.hasError()) { throwable = signal.getThrowable(); if (throwable instanceof HttpResponseException) { HttpResponseException exception = (HttpResponseException) throwable; statusCode = exception.getResponse().getStatusCode(); } } TracerProxy.end(statusCode, throwable, tracingContext.get()); } /** * Create an instance of the default serializer. * * @return the default serializer */ private static SerializerAdapter createDefaultSerializer() { return JacksonAdapter.createDefaultSerializerAdapter(); } /** * Create the default HttpPipeline. * * @return the default HttpPipeline */ private static HttpPipeline createDefaultPipeline() { List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new UserAgentPolicy()); policies.add(new RetryPolicy()); policies.add(new CookiePolicy()); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param <A> the type of the Swagger interface * @return a proxy implementation of the provided Swagger interface */ public static <A> A create(Class<A> swaggerInterface) { return create(swaggerInterface, createDefaultPipeline(), createDefaultSerializer()); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param httpPipeline the HttpPipelinePolicy and HttpClient pipeline that will be used to send Http requests * @param <A> the type of the Swagger interface * @return a proxy implementation of the provided Swagger interface */ public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline) { return create(swaggerInterface, httpPipeline, createDefaultSerializer()); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param httpPipeline the HttpPipelinePolicy and HttpClient pipline that will be used to send Http requests * @param serializer the serializer that will be used to convert POJOs to and from request and response bodies * @param <A> the type of the Swagger interface. * @return a proxy implementation of the provided Swagger interface */ @SuppressWarnings("unchecked") public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline, SerializerAdapter serializer) { final SwaggerInterfaceParser interfaceParser = new SwaggerInterfaceParser(swaggerInterface, serializer); final RestProxy restProxy = new RestProxy(httpPipeline, serializer, interfaceParser); return (A) Proxy.newProxyInstance(swaggerInterface.getClassLoader(), new Class<?>[]{swaggerInterface}, restProxy); } }
should there be a tracking issue referenced in the comment?
public Object invoke(Object proxy, final Method method, Object[] args) { validateResumeOperationIsNotPresent(method); try { final SwaggerMethodParser methodParser = getMethodParser(method); final HttpRequest request = createHttpRequest(methodParser, args); Context context = methodParser.setContext(args); RequestOptions options = methodParser.setRequestOptions(args); if (options != null) { Context optionsContext = options.getContext(); if (optionsContext != null && optionsContext != Context.NONE) { for (Map.Entry<Object, Object> kvp : optionsContext.getValues().entrySet()) { context = context.addData(kvp.getKey(), kvp.getValue()); } } } context = context.addData("caller-method", methodParser.getFullyQualifiedMethodName()) .addData("azure-eagerly-read-response", shouldEagerlyReadResponse(methodParser.getReturnType())); context = startTracingSpan(method, context); if (request.getBody() != null) { request.setBody(validateLength(request)); } if (options != null) { options.getRequestCallback().accept(request); } final Mono<HttpResponse> asyncResponse = send(request, context); Mono<HttpDecodedResponse> asyncDecodedResponse = this.decoder.decode(asyncResponse, methodParser); return handleRestReturnType(asyncDecodedResponse, methodParser, methodParser.getReturnType(), context, options); } catch (IOException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } }
public Object invoke(Object proxy, final Method method, Object[] args) { validateResumeOperationIsNotPresent(method); try { final SwaggerMethodParser methodParser = getMethodParser(method); final HttpRequest request = createHttpRequest(methodParser, args); Context context = methodParser.setContext(args); RequestOptions options = methodParser.setRequestOptions(args); context = mergeRequestOptionsContext(context, options); context = context.addData("caller-method", methodParser.getFullyQualifiedMethodName()) .addData("azure-eagerly-read-response", shouldEagerlyReadResponse(methodParser.getReturnType())); context = startTracingSpan(method, context); if (request.getBody() != null) { request.setBody(validateLength(request)); } if (options != null) { options.getRequestCallback().accept(request); } final Mono<HttpResponse> asyncResponse = send(request, context); Mono<HttpDecodedResponse> asyncDecodedResponse = this.decoder.decode(asyncResponse, methodParser); return handleRestReturnType(asyncDecodedResponse, methodParser, methodParser.getReturnType(), context, options); } catch (IOException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } }
class RestProxy implements InvocationHandler { private static final ByteBuffer VALIDATION_BUFFER = ByteBuffer.allocate(0); private static final String BODY_TOO_LARGE = "Request body emitted %d bytes, more than the expected %d bytes."; private static final String BODY_TOO_SMALL = "Request body emitted %d bytes, less than the expected %d bytes."; private static final String MUST_IMPLEMENT_PAGE_ERROR = "Unable to create PagedResponse<T>. Body must be of a type that implements: " + Page.class; private static final ResponseConstructorsCache RESPONSE_CONSTRUCTORS_CACHE = new ResponseConstructorsCache(); private final ClientLogger logger = new ClientLogger(RestProxy.class); private final HttpPipeline httpPipeline; private final SerializerAdapter serializer; private final SwaggerInterfaceParser interfaceParser; private final HttpResponseDecoder decoder; /** * Create a RestProxy. * * @param httpPipeline the HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP requests. * @param serializer the serializer that will be used to convert response bodies to POJOs. * @param interfaceParser the parser that contains information about the interface describing REST API methods that * this RestProxy "implements". */ private RestProxy(HttpPipeline httpPipeline, SerializerAdapter serializer, SwaggerInterfaceParser interfaceParser) { this.httpPipeline = httpPipeline; this.serializer = serializer; this.interfaceParser = interfaceParser; this.decoder = new HttpResponseDecoder(this.serializer); } /** * Get the SwaggerMethodParser for the provided method. The Method must exist on the Swagger interface that this * RestProxy was created to "implement". * * @param method the method to get a SwaggerMethodParser for * @return the SwaggerMethodParser for the provided method */ private SwaggerMethodParser getMethodParser(Method method) { return interfaceParser.getMethodParser(method); } /** * Send the provided request asynchronously, applying any request policies provided to the HttpClient instance. * * @param request the HTTP request to send * @param contextData the context * @return a {@link Mono} that emits HttpResponse asynchronously */ public Mono<HttpResponse> send(HttpRequest request, Context contextData) { return httpPipeline.send(request, contextData); } @Override @SuppressWarnings("deprecation") void validateResumeOperationIsNotPresent(Method method) { if (method.isAnnotationPresent(com.azure.core.annotation.ResumeOperation.class)) { throw logger.logExceptionAsError(Exceptions.propagate(new Exception("'ResumeOperation' isn't supported."))); } } static Flux<ByteBuffer> validateLength(final HttpRequest request) { final Flux<ByteBuffer> bbFlux = request.getBody(); if (bbFlux == null) { return Flux.empty(); } final long expectedLength = Long.parseLong(request.getHeaders().getValue("Content-Length")); return Flux.defer(() -> { final long[] currentTotalLength = new long[1]; return Flux.concat(bbFlux, Flux.just(VALIDATION_BUFFER)).handle((buffer, sink) -> { if (buffer == null) { return; } if (buffer == VALIDATION_BUFFER) { if (expectedLength != currentTotalLength[0]) { sink.error(new UnexpectedLengthException(String.format(BODY_TOO_SMALL, currentTotalLength[0], expectedLength), currentTotalLength[0], expectedLength)); } else { sink.complete(); } return; } currentTotalLength[0] += buffer.remaining(); if (currentTotalLength[0] > expectedLength) { sink.error(new UnexpectedLengthException(String.format(BODY_TOO_LARGE, currentTotalLength[0], expectedLength), currentTotalLength[0], expectedLength)); return; } sink.next(buffer); }); }); } /** * Starts the tracing span for the current service call, additionally set metadata attributes on the span by passing * additional context information. * * @param method Service method being called. * @param context Context information about the current service call. * @return The updated context containing the span context. */ private Context startTracingSpan(Method method, Context context) { if (!TracerProxy.isTracingEnabled()) { return context; } if ((boolean) context.getData(Tracer.DISABLE_TRACING_KEY).orElse(false)) { return context; } String spanName = interfaceParser.getServiceName() + "." + method.getName(); context = TracerProxy.setSpanName(spanName, context); return TracerProxy.start(spanName, context); } /** * Create a HttpRequest for the provided Swagger method using the provided arguments. * * @param methodParser the Swagger method parser to use * @param args the arguments to use to populate the method's annotation values * @return a HttpRequest * @throws IOException thrown if the body contents cannot be serialized */ private HttpRequest createHttpRequest(SwaggerMethodParser methodParser, Object[] args) throws IOException { final String path = methodParser.setPath(args); final UrlBuilder pathUrlBuilder = UrlBuilder.parse(path); final UrlBuilder urlBuilder; if (pathUrlBuilder.getScheme() != null) { urlBuilder = pathUrlBuilder; } else { urlBuilder = new UrlBuilder(); methodParser.setSchemeAndHost(args, urlBuilder); if (path != null && !path.isEmpty() && !"/".equals(path)) { String hostPath = urlBuilder.getPath(); if (hostPath == null || hostPath.isEmpty() || "/".equals(hostPath) || path.contains(": urlBuilder.setPath(path); } else { if (path.startsWith("/")) { urlBuilder.setPath(hostPath + path); } else { urlBuilder.setPath(hostPath + "/" + path); } } } } methodParser.setEncodedQueryParameters(args, urlBuilder); final URL url = urlBuilder.toUrl(); final HttpRequest request = configRequest(new HttpRequest(methodParser.getHttpMethod(), url), methodParser, args); HttpHeaders httpHeaders = request.getHeaders(); methodParser.setHeaders(args, httpHeaders); return request; } @SuppressWarnings("unchecked") private HttpRequest configRequest(final HttpRequest request, final SwaggerMethodParser methodParser, final Object[] args) throws IOException { final Object bodyContentObject = methodParser.setBody(args); if (bodyContentObject == null) { request.getHeaders().set("Content-Length", "0"); } else { String contentType = methodParser.getBodyContentType(); if (contentType == null || contentType.isEmpty()) { if (bodyContentObject instanceof byte[] || bodyContentObject instanceof String) { contentType = ContentType.APPLICATION_OCTET_STREAM; } else { contentType = ContentType.APPLICATION_JSON; } } request.getHeaders().set("Content-Type", contentType); if (bodyContentObject instanceof BinaryData) { BinaryData binaryData = (BinaryData) bodyContentObject; if (binaryData.getLength() != null) { request.setHeader("Content-Length", binaryData.getLength().toString()); } request.setBody(binaryData.toFluxByteBuffer()); return request; } boolean isJson = false; final String[] contentTypeParts = contentType.split(";"); for (final String contentTypePart : contentTypeParts) { if (contentTypePart.trim().equalsIgnoreCase(ContentType.APPLICATION_JSON)) { isJson = true; break; } } if (isJson) { ByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(); serializer.serialize(bodyContentObject, SerializerEncoding.JSON, stream); request.setHeader("Content-Length", String.valueOf(stream.size())); request.setBody(Flux.defer(() -> Flux.just(ByteBuffer.wrap(stream.toByteArray(), 0, stream.size())))); } else if (FluxUtil.isFluxByteBuffer(methodParser.getBodyJavaType())) { request.setBody((Flux<ByteBuffer>) bodyContentObject); } else if (bodyContentObject instanceof byte[]) { request.setBody((byte[]) bodyContentObject); } else if (bodyContentObject instanceof String) { final String bodyContentString = (String) bodyContentObject; if (!bodyContentString.isEmpty()) { request.setBody(bodyContentString); } } else if (bodyContentObject instanceof ByteBuffer) { request.setBody(Flux.just((ByteBuffer) bodyContentObject)); } else { ByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(); serializer.serialize(bodyContentObject, SerializerEncoding.fromHeaders(request.getHeaders()), stream); request.setHeader("Content-Length", String.valueOf(stream.size())); request.setBody(Flux.defer(() -> Flux.just(ByteBuffer.wrap(stream.toByteArray(), 0, stream.size())))); } } return request; } private Mono<HttpDecodedResponse> ensureExpectedStatus(final Mono<HttpDecodedResponse> asyncDecodedResponse, final SwaggerMethodParser methodParser, RequestOptions options) { return asyncDecodedResponse .flatMap(decodedHttpResponse -> ensureExpectedStatus(decodedHttpResponse, methodParser, options)); } private static Exception instantiateUnexpectedException(final UnexpectedExceptionInformation exception, final HttpResponse httpResponse, final byte[] responseContent, final Object responseDecodedContent) { final int responseStatusCode = httpResponse.getStatusCode(); final String contentType = httpResponse.getHeaderValue("Content-Type"); final String bodyRepresentation; if ("application/octet-stream".equalsIgnoreCase(contentType)) { bodyRepresentation = "(" + httpResponse.getHeaderValue("Content-Length") + "-byte body)"; } else { bodyRepresentation = responseContent == null || responseContent.length == 0 ? "(empty body)" : "\"" + new String(responseContent, StandardCharsets.UTF_8) + "\""; } Exception result; try { final Constructor<? extends HttpResponseException> exceptionConstructor = exception.getExceptionType() .getConstructor(String.class, HttpResponse.class, exception.getExceptionBodyType()); result = exceptionConstructor.newInstance("Status code " + responseStatusCode + ", " + bodyRepresentation, httpResponse, responseDecodedContent); } catch (ReflectiveOperationException e) { String message = "Status code " + responseStatusCode + ", but an instance of " + exception.getExceptionType().getCanonicalName() + " cannot be created." + " Response body: " + bodyRepresentation; result = new IOException(message, e); } return result; } /** * Create a publisher that (1) emits error if the provided response {@code decodedResponse} has 'disallowed status * code' OR (2) emits provided response if it's status code ia allowed. * * 'disallowed status code' is one of the status code defined in the provided SwaggerMethodParser or is in the int[] * of additional allowed status codes. * * @param decodedResponse The HttpResponse to check. * @param methodParser The method parser that contains information about the service interface method that initiated * the HTTP request. * @return An async-version of the provided decodedResponse. */ private Mono<HttpDecodedResponse> ensureExpectedStatus(final HttpDecodedResponse decodedResponse, final SwaggerMethodParser methodParser, RequestOptions options) { final int responseStatusCode = decodedResponse.getSourceResponse().getStatusCode(); if (methodParser.isExpectedResponseStatusCode(responseStatusCode) || (options != null && !options.isThrowOnError())) { return Mono.just(decodedResponse); } return decodedResponse.getSourceResponse().getBodyAsByteArray() .switchIfEmpty(Mono.defer(() -> { return Mono.error(instantiateUnexpectedException( methodParser.getUnexpectedException(responseStatusCode), decodedResponse.getSourceResponse(), null, null)); })) .flatMap(responseBytes -> decodedResponse.getDecodedBody(responseBytes) .switchIfEmpty(Mono.defer(() -> { return Mono.error(instantiateUnexpectedException( methodParser.getUnexpectedException(responseStatusCode), decodedResponse.getSourceResponse(), responseBytes, null)); })) .flatMap(decodedBody -> { return Mono.error(instantiateUnexpectedException( methodParser.getUnexpectedException(responseStatusCode), decodedResponse.getSourceResponse(), responseBytes, decodedBody)); })); } private Mono<?> handleRestResponseReturnType(final HttpDecodedResponse response, final SwaggerMethodParser methodParser, final Type entityType) { if (TypeUtil.isTypeOrSubTypeOf(entityType, Response.class)) { final Type bodyType = TypeUtil.getRestResponseBodyType(entityType); if (TypeUtil.isTypeOrSubTypeOf(bodyType, Void.class)) { return response.getSourceResponse().getBody().ignoreElements() .then(createResponse(response, entityType, null)); } else { return handleBodyReturnType(response, methodParser, bodyType) .flatMap(bodyAsObject -> createResponse(response, entityType, bodyAsObject)) .switchIfEmpty(Mono.defer((Supplier<Mono<Response<?>>>) () -> createResponse(response, entityType, null))); } } else { return handleBodyReturnType(response, methodParser, entityType); } } @SuppressWarnings("unchecked") private Mono<Response<?>> createResponse(HttpDecodedResponse response, Type entityType, Object bodyAsObject) { Class<? extends Response<?>> cls = (Class<? extends Response<?>>) TypeUtil.getRawClass(entityType); if (cls.equals(Response.class)) { cls = (Class<? extends Response<?>>) (Object) ResponseBase.class; } else if (cls.equals(PagedResponse.class)) { cls = (Class<? extends Response<?>>) (Object) PagedResponseBase.class; if (bodyAsObject != null && !TypeUtil.isTypeOrSubTypeOf(bodyAsObject.getClass(), Page.class)) { return monoError(logger, new RuntimeException(MUST_IMPLEMENT_PAGE_ERROR)); } } final Class<? extends Response<?>> clsFinal = cls; return Mono.just(RESPONSE_CONSTRUCTORS_CACHE.get(clsFinal)) .switchIfEmpty(Mono.defer(() -> Mono.error(new RuntimeException("Cannot find suitable constructor for class " + clsFinal)))) .flatMap(ctr -> RESPONSE_CONSTRUCTORS_CACHE.invoke(ctr, response, bodyAsObject)); } private Mono<?> handleBodyReturnType(final HttpDecodedResponse response, final SwaggerMethodParser methodParser, final Type entityType) { final int responseStatusCode = response.getSourceResponse().getStatusCode(); final HttpMethod httpMethod = methodParser.getHttpMethod(); final Type returnValueWireType = methodParser.getReturnValueWireType(); final Mono<?> asyncResult; if (httpMethod == HttpMethod.HEAD && (TypeUtil.isTypeOrSubTypeOf( entityType, Boolean.TYPE) || TypeUtil.isTypeOrSubTypeOf(entityType, Boolean.class))) { boolean isSuccess = (responseStatusCode / 100) == 2; asyncResult = Mono.just(isSuccess); } else if (TypeUtil.isTypeOrSubTypeOf(entityType, byte[].class)) { Mono<byte[]> responseBodyBytesAsync = response.getSourceResponse().getBodyAsByteArray(); if (returnValueWireType == Base64Url.class) { responseBodyBytesAsync = responseBodyBytesAsync .mapNotNull(base64UrlBytes -> new Base64Url(base64UrlBytes).decodedBytes()); } asyncResult = responseBodyBytesAsync; } else if (FluxUtil.isFluxByteBuffer(entityType)) { asyncResult = Mono.just(response.getSourceResponse().getBody()); } else if (TypeUtil.isTypeOrSubTypeOf(entityType, BinaryData.class)) { asyncResult = BinaryData.fromFlux(response.getSourceResponse().getBody()); } else { asyncResult = response.getDecodedBody((byte[]) null); } return asyncResult; } /** * Handle the provided asynchronous HTTP response and return the deserialized value. * * @param asyncHttpDecodedResponse the asynchronous HTTP response to the original HTTP request * @param methodParser the SwaggerMethodParser that the request originates from * @param returnType the type of value that will be returned * @param context Additional context that is passed through the Http pipeline during the service call. * @return the deserialized result */ private Object handleRestReturnType(final Mono<HttpDecodedResponse> asyncHttpDecodedResponse, final SwaggerMethodParser methodParser, final Type returnType, final Context context, final RequestOptions options) { final Mono<HttpDecodedResponse> asyncExpectedResponse = ensureExpectedStatus(asyncHttpDecodedResponse, methodParser, options) .doOnEach(RestProxy::endTracingSpan) .contextWrite(reactor.util.context.Context.of("TRACING_CONTEXT", context)); final Object result; if (TypeUtil.isTypeOrSubTypeOf(returnType, Mono.class)) { final Type monoTypeParam = TypeUtil.getTypeArgument(returnType); if (TypeUtil.isTypeOrSubTypeOf(monoTypeParam, Void.class)) { result = asyncExpectedResponse.then(); } else { result = asyncExpectedResponse.flatMap(response -> handleRestResponseReturnType(response, methodParser, monoTypeParam)); } } else if (FluxUtil.isFluxByteBuffer(returnType)) { result = asyncExpectedResponse.flatMapMany(ar -> ar.getSourceResponse().getBody()); } else if (TypeUtil.isTypeOrSubTypeOf(returnType, void.class) || TypeUtil.isTypeOrSubTypeOf(returnType, Void.class)) { asyncExpectedResponse.block(); result = null; } else { result = asyncExpectedResponse .flatMap(httpResponse -> handleRestResponseReturnType(httpResponse, methodParser, returnType)) .block(); } return result; } private static void endTracingSpan(Signal<HttpDecodedResponse> signal) { if (!TracerProxy.isTracingEnabled()) { return; } if (signal.isOnComplete() || signal.isOnSubscribe()) { return; } ContextView context = signal.getContextView(); Optional<Context> tracingContext = context.getOrEmpty("TRACING_CONTEXT"); boolean disableTracing = Boolean.TRUE.equals(context.getOrDefault(Tracer.DISABLE_TRACING_KEY, false)); if (!tracingContext.isPresent() || disableTracing) { return; } int statusCode = 0; HttpDecodedResponse httpDecodedResponse; Throwable throwable = null; if (signal.hasValue()) { httpDecodedResponse = signal.get(); statusCode = httpDecodedResponse.getSourceResponse().getStatusCode(); } else if (signal.hasError()) { throwable = signal.getThrowable(); if (throwable instanceof HttpResponseException) { HttpResponseException exception = (HttpResponseException) throwable; statusCode = exception.getResponse().getStatusCode(); } } TracerProxy.end(statusCode, throwable, tracingContext.get()); } /** * Create an instance of the default serializer. * * @return the default serializer */ private static SerializerAdapter createDefaultSerializer() { return JacksonAdapter.createDefaultSerializerAdapter(); } /** * Create the default HttpPipeline. * * @return the default HttpPipeline */ private static HttpPipeline createDefaultPipeline() { List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new UserAgentPolicy()); policies.add(new RetryPolicy()); policies.add(new CookiePolicy()); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param <A> the type of the Swagger interface * @return a proxy implementation of the provided Swagger interface */ public static <A> A create(Class<A> swaggerInterface) { return create(swaggerInterface, createDefaultPipeline(), createDefaultSerializer()); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param httpPipeline the HttpPipelinePolicy and HttpClient pipeline that will be used to send Http requests * @param <A> the type of the Swagger interface * @return a proxy implementation of the provided Swagger interface */ public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline) { return create(swaggerInterface, httpPipeline, createDefaultSerializer()); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param httpPipeline the HttpPipelinePolicy and HttpClient pipline that will be used to send Http requests * @param serializer the serializer that will be used to convert POJOs to and from request and response bodies * @param <A> the type of the Swagger interface. * @return a proxy implementation of the provided Swagger interface */ @SuppressWarnings("unchecked") public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline, SerializerAdapter serializer) { final SwaggerInterfaceParser interfaceParser = new SwaggerInterfaceParser(swaggerInterface, serializer); final RestProxy restProxy = new RestProxy(httpPipeline, serializer, interfaceParser); return (A) Proxy.newProxyInstance(swaggerInterface.getClassLoader(), new Class<?>[]{swaggerInterface}, restProxy); } }
class RestProxy implements InvocationHandler { private static final ByteBuffer VALIDATION_BUFFER = ByteBuffer.allocate(0); private static final String BODY_TOO_LARGE = "Request body emitted %d bytes, more than the expected %d bytes."; private static final String BODY_TOO_SMALL = "Request body emitted %d bytes, less than the expected %d bytes."; private static final String MUST_IMPLEMENT_PAGE_ERROR = "Unable to create PagedResponse<T>. Body must be of a type that implements: " + Page.class; private static final ResponseConstructorsCache RESPONSE_CONSTRUCTORS_CACHE = new ResponseConstructorsCache(); private final ClientLogger logger = new ClientLogger(RestProxy.class); private final HttpPipeline httpPipeline; private final SerializerAdapter serializer; private final SwaggerInterfaceParser interfaceParser; private final HttpResponseDecoder decoder; /** * Create a RestProxy. * * @param httpPipeline the HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP requests. * @param serializer the serializer that will be used to convert response bodies to POJOs. * @param interfaceParser the parser that contains information about the interface describing REST API methods that * this RestProxy "implements". */ private RestProxy(HttpPipeline httpPipeline, SerializerAdapter serializer, SwaggerInterfaceParser interfaceParser) { this.httpPipeline = httpPipeline; this.serializer = serializer; this.interfaceParser = interfaceParser; this.decoder = new HttpResponseDecoder(this.serializer); } /** * Get the SwaggerMethodParser for the provided method. The Method must exist on the Swagger interface that this * RestProxy was created to "implement". * * @param method the method to get a SwaggerMethodParser for * @return the SwaggerMethodParser for the provided method */ private SwaggerMethodParser getMethodParser(Method method) { return interfaceParser.getMethodParser(method); } /** * Send the provided request asynchronously, applying any request policies provided to the HttpClient instance. * * @param request the HTTP request to send * @param contextData the context * @return a {@link Mono} that emits HttpResponse asynchronously */ public Mono<HttpResponse> send(HttpRequest request, Context contextData) { return httpPipeline.send(request, contextData); } @Override @SuppressWarnings("deprecation") void validateResumeOperationIsNotPresent(Method method) { if (method.isAnnotationPresent(com.azure.core.annotation.ResumeOperation.class)) { throw logger.logExceptionAsError(Exceptions.propagate(new Exception("'ResumeOperation' isn't supported."))); } } static Context mergeRequestOptionsContext(Context context, RequestOptions options) { if (options == null) { return context; } Context optionsContext = options.getContext(); if (optionsContext != null && optionsContext != Context.NONE) { for (Map.Entry<Object, Object> kvp : optionsContext.getValues().entrySet()) { context = context.addData(kvp.getKey(), kvp.getValue()); } } return context; } static Flux<ByteBuffer> validateLength(final HttpRequest request) { final Flux<ByteBuffer> bbFlux = request.getBody(); if (bbFlux == null) { return Flux.empty(); } final long expectedLength = Long.parseLong(request.getHeaders().getValue("Content-Length")); return Flux.defer(() -> { final long[] currentTotalLength = new long[1]; return Flux.concat(bbFlux, Flux.just(VALIDATION_BUFFER)).handle((buffer, sink) -> { if (buffer == null) { return; } if (buffer == VALIDATION_BUFFER) { if (expectedLength != currentTotalLength[0]) { sink.error(new UnexpectedLengthException(String.format(BODY_TOO_SMALL, currentTotalLength[0], expectedLength), currentTotalLength[0], expectedLength)); } else { sink.complete(); } return; } currentTotalLength[0] += buffer.remaining(); if (currentTotalLength[0] > expectedLength) { sink.error(new UnexpectedLengthException(String.format(BODY_TOO_LARGE, currentTotalLength[0], expectedLength), currentTotalLength[0], expectedLength)); return; } sink.next(buffer); }); }); } /** * Starts the tracing span for the current service call, additionally set metadata attributes on the span by passing * additional context information. * * @param method Service method being called. * @param context Context information about the current service call. * @return The updated context containing the span context. */ private Context startTracingSpan(Method method, Context context) { if (!TracerProxy.isTracingEnabled()) { return context; } if ((boolean) context.getData(Tracer.DISABLE_TRACING_KEY).orElse(false)) { return context; } String spanName = interfaceParser.getServiceName() + "." + method.getName(); context = TracerProxy.setSpanName(spanName, context); return TracerProxy.start(spanName, context); } /** * Create a HttpRequest for the provided Swagger method using the provided arguments. * * @param methodParser the Swagger method parser to use * @param args the arguments to use to populate the method's annotation values * @return a HttpRequest * @throws IOException thrown if the body contents cannot be serialized */ private HttpRequest createHttpRequest(SwaggerMethodParser methodParser, Object[] args) throws IOException { final String path = methodParser.setPath(args); final UrlBuilder pathUrlBuilder = UrlBuilder.parse(path); final UrlBuilder urlBuilder; if (pathUrlBuilder.getScheme() != null) { urlBuilder = pathUrlBuilder; } else { urlBuilder = new UrlBuilder(); methodParser.setSchemeAndHost(args, urlBuilder); if (path != null && !path.isEmpty() && !"/".equals(path)) { String hostPath = urlBuilder.getPath(); if (hostPath == null || hostPath.isEmpty() || "/".equals(hostPath) || path.contains(": urlBuilder.setPath(path); } else { if (path.startsWith("/")) { urlBuilder.setPath(hostPath + path); } else { urlBuilder.setPath(hostPath + "/" + path); } } } } methodParser.setEncodedQueryParameters(args, urlBuilder); final URL url = urlBuilder.toUrl(); final HttpRequest request = configRequest(new HttpRequest(methodParser.getHttpMethod(), url), methodParser, args); HttpHeaders httpHeaders = request.getHeaders(); methodParser.setHeaders(args, httpHeaders); return request; } @SuppressWarnings("unchecked") private HttpRequest configRequest(final HttpRequest request, final SwaggerMethodParser methodParser, final Object[] args) throws IOException { final Object bodyContentObject = methodParser.setBody(args); if (bodyContentObject == null) { request.getHeaders().set("Content-Length", "0"); } else { String contentType = methodParser.getBodyContentType(); if (contentType == null || contentType.isEmpty()) { if (bodyContentObject instanceof byte[] || bodyContentObject instanceof String) { contentType = ContentType.APPLICATION_OCTET_STREAM; } else { contentType = ContentType.APPLICATION_JSON; } } request.getHeaders().set("Content-Type", contentType); if (bodyContentObject instanceof BinaryData) { BinaryData binaryData = (BinaryData) bodyContentObject; if (binaryData.getLength() != null) { request.setHeader("Content-Length", binaryData.getLength().toString()); } request.setBody(binaryData.toFluxByteBuffer()); return request; } boolean isJson = false; final String[] contentTypeParts = contentType.split(";"); for (final String contentTypePart : contentTypeParts) { if (contentTypePart.trim().equalsIgnoreCase(ContentType.APPLICATION_JSON)) { isJson = true; break; } } if (isJson) { ByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(); serializer.serialize(bodyContentObject, SerializerEncoding.JSON, stream); request.setHeader("Content-Length", String.valueOf(stream.size())); request.setBody(Flux.defer(() -> Flux.just(ByteBuffer.wrap(stream.toByteArray(), 0, stream.size())))); } else if (FluxUtil.isFluxByteBuffer(methodParser.getBodyJavaType())) { request.setBody((Flux<ByteBuffer>) bodyContentObject); } else if (bodyContentObject instanceof byte[]) { request.setBody((byte[]) bodyContentObject); } else if (bodyContentObject instanceof String) { final String bodyContentString = (String) bodyContentObject; if (!bodyContentString.isEmpty()) { request.setBody(bodyContentString); } } else if (bodyContentObject instanceof ByteBuffer) { request.setBody(Flux.just((ByteBuffer) bodyContentObject)); } else { ByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(); serializer.serialize(bodyContentObject, SerializerEncoding.fromHeaders(request.getHeaders()), stream); request.setHeader("Content-Length", String.valueOf(stream.size())); request.setBody(Flux.defer(() -> Flux.just(ByteBuffer.wrap(stream.toByteArray(), 0, stream.size())))); } } return request; } private Mono<HttpDecodedResponse> ensureExpectedStatus(final Mono<HttpDecodedResponse> asyncDecodedResponse, final SwaggerMethodParser methodParser, RequestOptions options) { return asyncDecodedResponse .flatMap(decodedHttpResponse -> ensureExpectedStatus(decodedHttpResponse, methodParser, options)); } private static Exception instantiateUnexpectedException(final UnexpectedExceptionInformation exception, final HttpResponse httpResponse, final byte[] responseContent, final Object responseDecodedContent) { final int responseStatusCode = httpResponse.getStatusCode(); final String contentType = httpResponse.getHeaderValue("Content-Type"); final String bodyRepresentation; if ("application/octet-stream".equalsIgnoreCase(contentType)) { bodyRepresentation = "(" + httpResponse.getHeaderValue("Content-Length") + "-byte body)"; } else { bodyRepresentation = responseContent == null || responseContent.length == 0 ? "(empty body)" : "\"" + new String(responseContent, StandardCharsets.UTF_8) + "\""; } Exception result; try { final Constructor<? extends HttpResponseException> exceptionConstructor = exception.getExceptionType() .getConstructor(String.class, HttpResponse.class, exception.getExceptionBodyType()); result = exceptionConstructor.newInstance("Status code " + responseStatusCode + ", " + bodyRepresentation, httpResponse, responseDecodedContent); } catch (ReflectiveOperationException e) { String message = "Status code " + responseStatusCode + ", but an instance of " + exception.getExceptionType().getCanonicalName() + " cannot be created." + " Response body: " + bodyRepresentation; result = new IOException(message, e); } return result; } /** * Create a publisher that (1) emits error if the provided response {@code decodedResponse} has 'disallowed status * code' OR (2) emits provided response if it's status code ia allowed. * * 'disallowed status code' is one of the status code defined in the provided SwaggerMethodParser or is in the int[] * of additional allowed status codes. * * @param decodedResponse The HttpResponse to check. * @param methodParser The method parser that contains information about the service interface method that initiated * the HTTP request. * @return An async-version of the provided decodedResponse. */ private Mono<HttpDecodedResponse> ensureExpectedStatus(final HttpDecodedResponse decodedResponse, final SwaggerMethodParser methodParser, RequestOptions options) { final int responseStatusCode = decodedResponse.getSourceResponse().getStatusCode(); if (methodParser.isExpectedResponseStatusCode(responseStatusCode) || (options != null && !options.isThrowOnError())) { return Mono.just(decodedResponse); } return decodedResponse.getSourceResponse().getBodyAsByteArray() .switchIfEmpty(Mono.defer(() -> { return Mono.error(instantiateUnexpectedException( methodParser.getUnexpectedException(responseStatusCode), decodedResponse.getSourceResponse(), null, null)); })) .flatMap(responseBytes -> decodedResponse.getDecodedBody(responseBytes) .switchIfEmpty(Mono.defer(() -> { return Mono.error(instantiateUnexpectedException( methodParser.getUnexpectedException(responseStatusCode), decodedResponse.getSourceResponse(), responseBytes, null)); })) .flatMap(decodedBody -> { return Mono.error(instantiateUnexpectedException( methodParser.getUnexpectedException(responseStatusCode), decodedResponse.getSourceResponse(), responseBytes, decodedBody)); })); } private Mono<?> handleRestResponseReturnType(final HttpDecodedResponse response, final SwaggerMethodParser methodParser, final Type entityType) { if (TypeUtil.isTypeOrSubTypeOf(entityType, Response.class)) { final Type bodyType = TypeUtil.getRestResponseBodyType(entityType); if (TypeUtil.isTypeOrSubTypeOf(bodyType, Void.class)) { return response.getSourceResponse().getBody().ignoreElements() .then(createResponse(response, entityType, null)); } else { return handleBodyReturnType(response, methodParser, bodyType) .flatMap(bodyAsObject -> createResponse(response, entityType, bodyAsObject)) .switchIfEmpty(Mono.defer((Supplier<Mono<Response<?>>>) () -> createResponse(response, entityType, null))); } } else { return handleBodyReturnType(response, methodParser, entityType); } } @SuppressWarnings("unchecked") private Mono<Response<?>> createResponse(HttpDecodedResponse response, Type entityType, Object bodyAsObject) { Class<? extends Response<?>> cls = (Class<? extends Response<?>>) TypeUtil.getRawClass(entityType); if (cls.equals(Response.class)) { cls = (Class<? extends Response<?>>) (Object) ResponseBase.class; } else if (cls.equals(PagedResponse.class)) { cls = (Class<? extends Response<?>>) (Object) PagedResponseBase.class; if (bodyAsObject != null && !TypeUtil.isTypeOrSubTypeOf(bodyAsObject.getClass(), Page.class)) { return monoError(logger, new RuntimeException(MUST_IMPLEMENT_PAGE_ERROR)); } } final Class<? extends Response<?>> clsFinal = cls; return Mono.just(RESPONSE_CONSTRUCTORS_CACHE.get(clsFinal)) .switchIfEmpty(Mono.defer(() -> Mono.error(new RuntimeException("Cannot find suitable constructor for class " + clsFinal)))) .flatMap(ctr -> RESPONSE_CONSTRUCTORS_CACHE.invoke(ctr, response, bodyAsObject)); } private Mono<?> handleBodyReturnType(final HttpDecodedResponse response, final SwaggerMethodParser methodParser, final Type entityType) { final int responseStatusCode = response.getSourceResponse().getStatusCode(); final HttpMethod httpMethod = methodParser.getHttpMethod(); final Type returnValueWireType = methodParser.getReturnValueWireType(); final Mono<?> asyncResult; if (httpMethod == HttpMethod.HEAD && (TypeUtil.isTypeOrSubTypeOf( entityType, Boolean.TYPE) || TypeUtil.isTypeOrSubTypeOf(entityType, Boolean.class))) { boolean isSuccess = (responseStatusCode / 100) == 2; asyncResult = Mono.just(isSuccess); } else if (TypeUtil.isTypeOrSubTypeOf(entityType, byte[].class)) { Mono<byte[]> responseBodyBytesAsync = response.getSourceResponse().getBodyAsByteArray(); if (returnValueWireType == Base64Url.class) { responseBodyBytesAsync = responseBodyBytesAsync .mapNotNull(base64UrlBytes -> new Base64Url(base64UrlBytes).decodedBytes()); } asyncResult = responseBodyBytesAsync; } else if (FluxUtil.isFluxByteBuffer(entityType)) { asyncResult = Mono.just(response.getSourceResponse().getBody()); } else if (TypeUtil.isTypeOrSubTypeOf(entityType, BinaryData.class)) { asyncResult = BinaryData.fromFlux(response.getSourceResponse().getBody()); } else { asyncResult = response.getDecodedBody((byte[]) null); } return asyncResult; } /** * Handle the provided asynchronous HTTP response and return the deserialized value. * * @param asyncHttpDecodedResponse the asynchronous HTTP response to the original HTTP request * @param methodParser the SwaggerMethodParser that the request originates from * @param returnType the type of value that will be returned * @param context Additional context that is passed through the Http pipeline during the service call. * @return the deserialized result */ private Object handleRestReturnType(final Mono<HttpDecodedResponse> asyncHttpDecodedResponse, final SwaggerMethodParser methodParser, final Type returnType, final Context context, final RequestOptions options) { final Mono<HttpDecodedResponse> asyncExpectedResponse = ensureExpectedStatus(asyncHttpDecodedResponse, methodParser, options) .doOnEach(RestProxy::endTracingSpan) .contextWrite(reactor.util.context.Context.of("TRACING_CONTEXT", context)); final Object result; if (TypeUtil.isTypeOrSubTypeOf(returnType, Mono.class)) { final Type monoTypeParam = TypeUtil.getTypeArgument(returnType); if (TypeUtil.isTypeOrSubTypeOf(monoTypeParam, Void.class)) { result = asyncExpectedResponse.then(); } else { result = asyncExpectedResponse.flatMap(response -> handleRestResponseReturnType(response, methodParser, monoTypeParam)); } } else if (FluxUtil.isFluxByteBuffer(returnType)) { result = asyncExpectedResponse.flatMapMany(ar -> ar.getSourceResponse().getBody()); } else if (TypeUtil.isTypeOrSubTypeOf(returnType, void.class) || TypeUtil.isTypeOrSubTypeOf(returnType, Void.class)) { asyncExpectedResponse.block(); result = null; } else { result = asyncExpectedResponse .flatMap(httpResponse -> handleRestResponseReturnType(httpResponse, methodParser, returnType)) .block(); } return result; } private static void endTracingSpan(Signal<HttpDecodedResponse> signal) { if (!TracerProxy.isTracingEnabled()) { return; } if (signal.isOnComplete() || signal.isOnSubscribe()) { return; } ContextView context = signal.getContextView(); Optional<Context> tracingContext = context.getOrEmpty("TRACING_CONTEXT"); boolean disableTracing = Boolean.TRUE.equals(context.getOrDefault(Tracer.DISABLE_TRACING_KEY, false)); if (!tracingContext.isPresent() || disableTracing) { return; } int statusCode = 0; HttpDecodedResponse httpDecodedResponse; Throwable throwable = null; if (signal.hasValue()) { httpDecodedResponse = signal.get(); statusCode = httpDecodedResponse.getSourceResponse().getStatusCode(); } else if (signal.hasError()) { throwable = signal.getThrowable(); if (throwable instanceof HttpResponseException) { HttpResponseException exception = (HttpResponseException) throwable; statusCode = exception.getResponse().getStatusCode(); } } TracerProxy.end(statusCode, throwable, tracingContext.get()); } /** * Create an instance of the default serializer. * * @return the default serializer */ private static SerializerAdapter createDefaultSerializer() { return JacksonAdapter.createDefaultSerializerAdapter(); } /** * Create the default HttpPipeline. * * @return the default HttpPipeline */ private static HttpPipeline createDefaultPipeline() { List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new UserAgentPolicy()); policies.add(new RetryPolicy()); policies.add(new CookiePolicy()); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param <A> the type of the Swagger interface * @return a proxy implementation of the provided Swagger interface */ public static <A> A create(Class<A> swaggerInterface) { return create(swaggerInterface, createDefaultPipeline(), createDefaultSerializer()); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param httpPipeline the HttpPipelinePolicy and HttpClient pipeline that will be used to send Http requests * @param <A> the type of the Swagger interface * @return a proxy implementation of the provided Swagger interface */ public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline) { return create(swaggerInterface, httpPipeline, createDefaultSerializer()); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param httpPipeline the HttpPipelinePolicy and HttpClient pipline that will be used to send Http requests * @param serializer the serializer that will be used to convert POJOs to and from request and response bodies * @param <A> the type of the Swagger interface. * @return a proxy implementation of the provided Swagger interface */ @SuppressWarnings("unchecked") public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline, SerializerAdapter serializer) { final SwaggerInterfaceParser interfaceParser = new SwaggerInterfaceParser(swaggerInterface, serializer); final RestProxy restProxy = new RestProxy(httpPipeline, serializer, interfaceParser); return (A) Proxy.newProxyInstance(swaggerInterface.getClassLoader(), new Class<?>[]{swaggerInterface}, restProxy); } }
should we add a perf test for this? /cc @g2vinay
public Object invoke(Object proxy, final Method method, Object[] args) { validateResumeOperationIsNotPresent(method); try { final SwaggerMethodParser methodParser = getMethodParser(method); final HttpRequest request = createHttpRequest(methodParser, args); Context context = methodParser.setContext(args); RequestOptions options = methodParser.setRequestOptions(args); if (options != null) { Context optionsContext = options.getContext(); if (optionsContext != null && optionsContext != Context.NONE) { for (Map.Entry<Object, Object> kvp : optionsContext.getValues().entrySet()) { context = context.addData(kvp.getKey(), kvp.getValue()); } } } context = context.addData("caller-method", methodParser.getFullyQualifiedMethodName()) .addData("azure-eagerly-read-response", shouldEagerlyReadResponse(methodParser.getReturnType())); context = startTracingSpan(method, context); if (request.getBody() != null) { request.setBody(validateLength(request)); } if (options != null) { options.getRequestCallback().accept(request); } final Mono<HttpResponse> asyncResponse = send(request, context); Mono<HttpDecodedResponse> asyncDecodedResponse = this.decoder.decode(asyncResponse, methodParser); return handleRestReturnType(asyncDecodedResponse, methodParser, methodParser.getReturnType(), context, options); } catch (IOException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } }
if (optionsContext != null && optionsContext != Context.NONE) {
public Object invoke(Object proxy, final Method method, Object[] args) { validateResumeOperationIsNotPresent(method); try { final SwaggerMethodParser methodParser = getMethodParser(method); final HttpRequest request = createHttpRequest(methodParser, args); Context context = methodParser.setContext(args); RequestOptions options = methodParser.setRequestOptions(args); context = mergeRequestOptionsContext(context, options); context = context.addData("caller-method", methodParser.getFullyQualifiedMethodName()) .addData("azure-eagerly-read-response", shouldEagerlyReadResponse(methodParser.getReturnType())); context = startTracingSpan(method, context); if (request.getBody() != null) { request.setBody(validateLength(request)); } if (options != null) { options.getRequestCallback().accept(request); } final Mono<HttpResponse> asyncResponse = send(request, context); Mono<HttpDecodedResponse> asyncDecodedResponse = this.decoder.decode(asyncResponse, methodParser); return handleRestReturnType(asyncDecodedResponse, methodParser, methodParser.getReturnType(), context, options); } catch (IOException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } }
class RestProxy implements InvocationHandler { private static final ByteBuffer VALIDATION_BUFFER = ByteBuffer.allocate(0); private static final String BODY_TOO_LARGE = "Request body emitted %d bytes, more than the expected %d bytes."; private static final String BODY_TOO_SMALL = "Request body emitted %d bytes, less than the expected %d bytes."; private static final String MUST_IMPLEMENT_PAGE_ERROR = "Unable to create PagedResponse<T>. Body must be of a type that implements: " + Page.class; private static final ResponseConstructorsCache RESPONSE_CONSTRUCTORS_CACHE = new ResponseConstructorsCache(); private final ClientLogger logger = new ClientLogger(RestProxy.class); private final HttpPipeline httpPipeline; private final SerializerAdapter serializer; private final SwaggerInterfaceParser interfaceParser; private final HttpResponseDecoder decoder; /** * Create a RestProxy. * * @param httpPipeline the HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP requests. * @param serializer the serializer that will be used to convert response bodies to POJOs. * @param interfaceParser the parser that contains information about the interface describing REST API methods that * this RestProxy "implements". */ private RestProxy(HttpPipeline httpPipeline, SerializerAdapter serializer, SwaggerInterfaceParser interfaceParser) { this.httpPipeline = httpPipeline; this.serializer = serializer; this.interfaceParser = interfaceParser; this.decoder = new HttpResponseDecoder(this.serializer); } /** * Get the SwaggerMethodParser for the provided method. The Method must exist on the Swagger interface that this * RestProxy was created to "implement". * * @param method the method to get a SwaggerMethodParser for * @return the SwaggerMethodParser for the provided method */ private SwaggerMethodParser getMethodParser(Method method) { return interfaceParser.getMethodParser(method); } /** * Send the provided request asynchronously, applying any request policies provided to the HttpClient instance. * * @param request the HTTP request to send * @param contextData the context * @return a {@link Mono} that emits HttpResponse asynchronously */ public Mono<HttpResponse> send(HttpRequest request, Context contextData) { return httpPipeline.send(request, contextData); } @Override @SuppressWarnings("deprecation") void validateResumeOperationIsNotPresent(Method method) { if (method.isAnnotationPresent(com.azure.core.annotation.ResumeOperation.class)) { throw logger.logExceptionAsError(Exceptions.propagate(new Exception("'ResumeOperation' isn't supported."))); } } static Flux<ByteBuffer> validateLength(final HttpRequest request) { final Flux<ByteBuffer> bbFlux = request.getBody(); if (bbFlux == null) { return Flux.empty(); } final long expectedLength = Long.parseLong(request.getHeaders().getValue("Content-Length")); return Flux.defer(() -> { final long[] currentTotalLength = new long[1]; return Flux.concat(bbFlux, Flux.just(VALIDATION_BUFFER)).handle((buffer, sink) -> { if (buffer == null) { return; } if (buffer == VALIDATION_BUFFER) { if (expectedLength != currentTotalLength[0]) { sink.error(new UnexpectedLengthException(String.format(BODY_TOO_SMALL, currentTotalLength[0], expectedLength), currentTotalLength[0], expectedLength)); } else { sink.complete(); } return; } currentTotalLength[0] += buffer.remaining(); if (currentTotalLength[0] > expectedLength) { sink.error(new UnexpectedLengthException(String.format(BODY_TOO_LARGE, currentTotalLength[0], expectedLength), currentTotalLength[0], expectedLength)); return; } sink.next(buffer); }); }); } /** * Starts the tracing span for the current service call, additionally set metadata attributes on the span by passing * additional context information. * * @param method Service method being called. * @param context Context information about the current service call. * @return The updated context containing the span context. */ private Context startTracingSpan(Method method, Context context) { if (!TracerProxy.isTracingEnabled()) { return context; } if ((boolean) context.getData(Tracer.DISABLE_TRACING_KEY).orElse(false)) { return context; } String spanName = interfaceParser.getServiceName() + "." + method.getName(); context = TracerProxy.setSpanName(spanName, context); return TracerProxy.start(spanName, context); } /** * Create a HttpRequest for the provided Swagger method using the provided arguments. * * @param methodParser the Swagger method parser to use * @param args the arguments to use to populate the method's annotation values * @return a HttpRequest * @throws IOException thrown if the body contents cannot be serialized */ private HttpRequest createHttpRequest(SwaggerMethodParser methodParser, Object[] args) throws IOException { final String path = methodParser.setPath(args); final UrlBuilder pathUrlBuilder = UrlBuilder.parse(path); final UrlBuilder urlBuilder; if (pathUrlBuilder.getScheme() != null) { urlBuilder = pathUrlBuilder; } else { urlBuilder = new UrlBuilder(); methodParser.setSchemeAndHost(args, urlBuilder); if (path != null && !path.isEmpty() && !"/".equals(path)) { String hostPath = urlBuilder.getPath(); if (hostPath == null || hostPath.isEmpty() || "/".equals(hostPath) || path.contains(": urlBuilder.setPath(path); } else { if (path.startsWith("/")) { urlBuilder.setPath(hostPath + path); } else { urlBuilder.setPath(hostPath + "/" + path); } } } } methodParser.setEncodedQueryParameters(args, urlBuilder); final URL url = urlBuilder.toUrl(); final HttpRequest request = configRequest(new HttpRequest(methodParser.getHttpMethod(), url), methodParser, args); HttpHeaders httpHeaders = request.getHeaders(); methodParser.setHeaders(args, httpHeaders); return request; } @SuppressWarnings("unchecked") private HttpRequest configRequest(final HttpRequest request, final SwaggerMethodParser methodParser, final Object[] args) throws IOException { final Object bodyContentObject = methodParser.setBody(args); if (bodyContentObject == null) { request.getHeaders().set("Content-Length", "0"); } else { String contentType = methodParser.getBodyContentType(); if (contentType == null || contentType.isEmpty()) { if (bodyContentObject instanceof byte[] || bodyContentObject instanceof String) { contentType = ContentType.APPLICATION_OCTET_STREAM; } else { contentType = ContentType.APPLICATION_JSON; } } request.getHeaders().set("Content-Type", contentType); if (bodyContentObject instanceof BinaryData) { BinaryData binaryData = (BinaryData) bodyContentObject; if (binaryData.getLength() != null) { request.setHeader("Content-Length", binaryData.getLength().toString()); } request.setBody(binaryData.toFluxByteBuffer()); return request; } boolean isJson = false; final String[] contentTypeParts = contentType.split(";"); for (final String contentTypePart : contentTypeParts) { if (contentTypePart.trim().equalsIgnoreCase(ContentType.APPLICATION_JSON)) { isJson = true; break; } } if (isJson) { ByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(); serializer.serialize(bodyContentObject, SerializerEncoding.JSON, stream); request.setHeader("Content-Length", String.valueOf(stream.size())); request.setBody(Flux.defer(() -> Flux.just(ByteBuffer.wrap(stream.toByteArray(), 0, stream.size())))); } else if (FluxUtil.isFluxByteBuffer(methodParser.getBodyJavaType())) { request.setBody((Flux<ByteBuffer>) bodyContentObject); } else if (bodyContentObject instanceof byte[]) { request.setBody((byte[]) bodyContentObject); } else if (bodyContentObject instanceof String) { final String bodyContentString = (String) bodyContentObject; if (!bodyContentString.isEmpty()) { request.setBody(bodyContentString); } } else if (bodyContentObject instanceof ByteBuffer) { request.setBody(Flux.just((ByteBuffer) bodyContentObject)); } else { ByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(); serializer.serialize(bodyContentObject, SerializerEncoding.fromHeaders(request.getHeaders()), stream); request.setHeader("Content-Length", String.valueOf(stream.size())); request.setBody(Flux.defer(() -> Flux.just(ByteBuffer.wrap(stream.toByteArray(), 0, stream.size())))); } } return request; } private Mono<HttpDecodedResponse> ensureExpectedStatus(final Mono<HttpDecodedResponse> asyncDecodedResponse, final SwaggerMethodParser methodParser, RequestOptions options) { return asyncDecodedResponse .flatMap(decodedHttpResponse -> ensureExpectedStatus(decodedHttpResponse, methodParser, options)); } private static Exception instantiateUnexpectedException(final UnexpectedExceptionInformation exception, final HttpResponse httpResponse, final byte[] responseContent, final Object responseDecodedContent) { final int responseStatusCode = httpResponse.getStatusCode(); final String contentType = httpResponse.getHeaderValue("Content-Type"); final String bodyRepresentation; if ("application/octet-stream".equalsIgnoreCase(contentType)) { bodyRepresentation = "(" + httpResponse.getHeaderValue("Content-Length") + "-byte body)"; } else { bodyRepresentation = responseContent == null || responseContent.length == 0 ? "(empty body)" : "\"" + new String(responseContent, StandardCharsets.UTF_8) + "\""; } Exception result; try { final Constructor<? extends HttpResponseException> exceptionConstructor = exception.getExceptionType() .getConstructor(String.class, HttpResponse.class, exception.getExceptionBodyType()); result = exceptionConstructor.newInstance("Status code " + responseStatusCode + ", " + bodyRepresentation, httpResponse, responseDecodedContent); } catch (ReflectiveOperationException e) { String message = "Status code " + responseStatusCode + ", but an instance of " + exception.getExceptionType().getCanonicalName() + " cannot be created." + " Response body: " + bodyRepresentation; result = new IOException(message, e); } return result; } /** * Create a publisher that (1) emits error if the provided response {@code decodedResponse} has 'disallowed status * code' OR (2) emits provided response if it's status code ia allowed. * * 'disallowed status code' is one of the status code defined in the provided SwaggerMethodParser or is in the int[] * of additional allowed status codes. * * @param decodedResponse The HttpResponse to check. * @param methodParser The method parser that contains information about the service interface method that initiated * the HTTP request. * @return An async-version of the provided decodedResponse. */ private Mono<HttpDecodedResponse> ensureExpectedStatus(final HttpDecodedResponse decodedResponse, final SwaggerMethodParser methodParser, RequestOptions options) { final int responseStatusCode = decodedResponse.getSourceResponse().getStatusCode(); if (methodParser.isExpectedResponseStatusCode(responseStatusCode) || (options != null && !options.isThrowOnError())) { return Mono.just(decodedResponse); } return decodedResponse.getSourceResponse().getBodyAsByteArray() .switchIfEmpty(Mono.defer(() -> { return Mono.error(instantiateUnexpectedException( methodParser.getUnexpectedException(responseStatusCode), decodedResponse.getSourceResponse(), null, null)); })) .flatMap(responseBytes -> decodedResponse.getDecodedBody(responseBytes) .switchIfEmpty(Mono.defer(() -> { return Mono.error(instantiateUnexpectedException( methodParser.getUnexpectedException(responseStatusCode), decodedResponse.getSourceResponse(), responseBytes, null)); })) .flatMap(decodedBody -> { return Mono.error(instantiateUnexpectedException( methodParser.getUnexpectedException(responseStatusCode), decodedResponse.getSourceResponse(), responseBytes, decodedBody)); })); } private Mono<?> handleRestResponseReturnType(final HttpDecodedResponse response, final SwaggerMethodParser methodParser, final Type entityType) { if (TypeUtil.isTypeOrSubTypeOf(entityType, Response.class)) { final Type bodyType = TypeUtil.getRestResponseBodyType(entityType); if (TypeUtil.isTypeOrSubTypeOf(bodyType, Void.class)) { return response.getSourceResponse().getBody().ignoreElements() .then(createResponse(response, entityType, null)); } else { return handleBodyReturnType(response, methodParser, bodyType) .flatMap(bodyAsObject -> createResponse(response, entityType, bodyAsObject)) .switchIfEmpty(Mono.defer((Supplier<Mono<Response<?>>>) () -> createResponse(response, entityType, null))); } } else { return handleBodyReturnType(response, methodParser, entityType); } } @SuppressWarnings("unchecked") private Mono<Response<?>> createResponse(HttpDecodedResponse response, Type entityType, Object bodyAsObject) { Class<? extends Response<?>> cls = (Class<? extends Response<?>>) TypeUtil.getRawClass(entityType); if (cls.equals(Response.class)) { cls = (Class<? extends Response<?>>) (Object) ResponseBase.class; } else if (cls.equals(PagedResponse.class)) { cls = (Class<? extends Response<?>>) (Object) PagedResponseBase.class; if (bodyAsObject != null && !TypeUtil.isTypeOrSubTypeOf(bodyAsObject.getClass(), Page.class)) { return monoError(logger, new RuntimeException(MUST_IMPLEMENT_PAGE_ERROR)); } } final Class<? extends Response<?>> clsFinal = cls; return Mono.just(RESPONSE_CONSTRUCTORS_CACHE.get(clsFinal)) .switchIfEmpty(Mono.defer(() -> Mono.error(new RuntimeException("Cannot find suitable constructor for class " + clsFinal)))) .flatMap(ctr -> RESPONSE_CONSTRUCTORS_CACHE.invoke(ctr, response, bodyAsObject)); } private Mono<?> handleBodyReturnType(final HttpDecodedResponse response, final SwaggerMethodParser methodParser, final Type entityType) { final int responseStatusCode = response.getSourceResponse().getStatusCode(); final HttpMethod httpMethod = methodParser.getHttpMethod(); final Type returnValueWireType = methodParser.getReturnValueWireType(); final Mono<?> asyncResult; if (httpMethod == HttpMethod.HEAD && (TypeUtil.isTypeOrSubTypeOf( entityType, Boolean.TYPE) || TypeUtil.isTypeOrSubTypeOf(entityType, Boolean.class))) { boolean isSuccess = (responseStatusCode / 100) == 2; asyncResult = Mono.just(isSuccess); } else if (TypeUtil.isTypeOrSubTypeOf(entityType, byte[].class)) { Mono<byte[]> responseBodyBytesAsync = response.getSourceResponse().getBodyAsByteArray(); if (returnValueWireType == Base64Url.class) { responseBodyBytesAsync = responseBodyBytesAsync .mapNotNull(base64UrlBytes -> new Base64Url(base64UrlBytes).decodedBytes()); } asyncResult = responseBodyBytesAsync; } else if (FluxUtil.isFluxByteBuffer(entityType)) { asyncResult = Mono.just(response.getSourceResponse().getBody()); } else if (TypeUtil.isTypeOrSubTypeOf(entityType, BinaryData.class)) { asyncResult = BinaryData.fromFlux(response.getSourceResponse().getBody()); } else { asyncResult = response.getDecodedBody((byte[]) null); } return asyncResult; } /** * Handle the provided asynchronous HTTP response and return the deserialized value. * * @param asyncHttpDecodedResponse the asynchronous HTTP response to the original HTTP request * @param methodParser the SwaggerMethodParser that the request originates from * @param returnType the type of value that will be returned * @param context Additional context that is passed through the Http pipeline during the service call. * @return the deserialized result */ private Object handleRestReturnType(final Mono<HttpDecodedResponse> asyncHttpDecodedResponse, final SwaggerMethodParser methodParser, final Type returnType, final Context context, final RequestOptions options) { final Mono<HttpDecodedResponse> asyncExpectedResponse = ensureExpectedStatus(asyncHttpDecodedResponse, methodParser, options) .doOnEach(RestProxy::endTracingSpan) .contextWrite(reactor.util.context.Context.of("TRACING_CONTEXT", context)); final Object result; if (TypeUtil.isTypeOrSubTypeOf(returnType, Mono.class)) { final Type monoTypeParam = TypeUtil.getTypeArgument(returnType); if (TypeUtil.isTypeOrSubTypeOf(monoTypeParam, Void.class)) { result = asyncExpectedResponse.then(); } else { result = asyncExpectedResponse.flatMap(response -> handleRestResponseReturnType(response, methodParser, monoTypeParam)); } } else if (FluxUtil.isFluxByteBuffer(returnType)) { result = asyncExpectedResponse.flatMapMany(ar -> ar.getSourceResponse().getBody()); } else if (TypeUtil.isTypeOrSubTypeOf(returnType, void.class) || TypeUtil.isTypeOrSubTypeOf(returnType, Void.class)) { asyncExpectedResponse.block(); result = null; } else { result = asyncExpectedResponse .flatMap(httpResponse -> handleRestResponseReturnType(httpResponse, methodParser, returnType)) .block(); } return result; } private static void endTracingSpan(Signal<HttpDecodedResponse> signal) { if (!TracerProxy.isTracingEnabled()) { return; } if (signal.isOnComplete() || signal.isOnSubscribe()) { return; } ContextView context = signal.getContextView(); Optional<Context> tracingContext = context.getOrEmpty("TRACING_CONTEXT"); boolean disableTracing = Boolean.TRUE.equals(context.getOrDefault(Tracer.DISABLE_TRACING_KEY, false)); if (!tracingContext.isPresent() || disableTracing) { return; } int statusCode = 0; HttpDecodedResponse httpDecodedResponse; Throwable throwable = null; if (signal.hasValue()) { httpDecodedResponse = signal.get(); statusCode = httpDecodedResponse.getSourceResponse().getStatusCode(); } else if (signal.hasError()) { throwable = signal.getThrowable(); if (throwable instanceof HttpResponseException) { HttpResponseException exception = (HttpResponseException) throwable; statusCode = exception.getResponse().getStatusCode(); } } TracerProxy.end(statusCode, throwable, tracingContext.get()); } /** * Create an instance of the default serializer. * * @return the default serializer */ private static SerializerAdapter createDefaultSerializer() { return JacksonAdapter.createDefaultSerializerAdapter(); } /** * Create the default HttpPipeline. * * @return the default HttpPipeline */ private static HttpPipeline createDefaultPipeline() { List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new UserAgentPolicy()); policies.add(new RetryPolicy()); policies.add(new CookiePolicy()); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param <A> the type of the Swagger interface * @return a proxy implementation of the provided Swagger interface */ public static <A> A create(Class<A> swaggerInterface) { return create(swaggerInterface, createDefaultPipeline(), createDefaultSerializer()); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param httpPipeline the HttpPipelinePolicy and HttpClient pipeline that will be used to send Http requests * @param <A> the type of the Swagger interface * @return a proxy implementation of the provided Swagger interface */ public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline) { return create(swaggerInterface, httpPipeline, createDefaultSerializer()); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param httpPipeline the HttpPipelinePolicy and HttpClient pipline that will be used to send Http requests * @param serializer the serializer that will be used to convert POJOs to and from request and response bodies * @param <A> the type of the Swagger interface. * @return a proxy implementation of the provided Swagger interface */ @SuppressWarnings("unchecked") public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline, SerializerAdapter serializer) { final SwaggerInterfaceParser interfaceParser = new SwaggerInterfaceParser(swaggerInterface, serializer); final RestProxy restProxy = new RestProxy(httpPipeline, serializer, interfaceParser); return (A) Proxy.newProxyInstance(swaggerInterface.getClassLoader(), new Class<?>[]{swaggerInterface}, restProxy); } }
class RestProxy implements InvocationHandler { private static final ByteBuffer VALIDATION_BUFFER = ByteBuffer.allocate(0); private static final String BODY_TOO_LARGE = "Request body emitted %d bytes, more than the expected %d bytes."; private static final String BODY_TOO_SMALL = "Request body emitted %d bytes, less than the expected %d bytes."; private static final String MUST_IMPLEMENT_PAGE_ERROR = "Unable to create PagedResponse<T>. Body must be of a type that implements: " + Page.class; private static final ResponseConstructorsCache RESPONSE_CONSTRUCTORS_CACHE = new ResponseConstructorsCache(); private final ClientLogger logger = new ClientLogger(RestProxy.class); private final HttpPipeline httpPipeline; private final SerializerAdapter serializer; private final SwaggerInterfaceParser interfaceParser; private final HttpResponseDecoder decoder; /** * Create a RestProxy. * * @param httpPipeline the HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP requests. * @param serializer the serializer that will be used to convert response bodies to POJOs. * @param interfaceParser the parser that contains information about the interface describing REST API methods that * this RestProxy "implements". */ private RestProxy(HttpPipeline httpPipeline, SerializerAdapter serializer, SwaggerInterfaceParser interfaceParser) { this.httpPipeline = httpPipeline; this.serializer = serializer; this.interfaceParser = interfaceParser; this.decoder = new HttpResponseDecoder(this.serializer); } /** * Get the SwaggerMethodParser for the provided method. The Method must exist on the Swagger interface that this * RestProxy was created to "implement". * * @param method the method to get a SwaggerMethodParser for * @return the SwaggerMethodParser for the provided method */ private SwaggerMethodParser getMethodParser(Method method) { return interfaceParser.getMethodParser(method); } /** * Send the provided request asynchronously, applying any request policies provided to the HttpClient instance. * * @param request the HTTP request to send * @param contextData the context * @return a {@link Mono} that emits HttpResponse asynchronously */ public Mono<HttpResponse> send(HttpRequest request, Context contextData) { return httpPipeline.send(request, contextData); } @Override @SuppressWarnings("deprecation") void validateResumeOperationIsNotPresent(Method method) { if (method.isAnnotationPresent(com.azure.core.annotation.ResumeOperation.class)) { throw logger.logExceptionAsError(Exceptions.propagate(new Exception("'ResumeOperation' isn't supported."))); } } static Context mergeRequestOptionsContext(Context context, RequestOptions options) { if (options == null) { return context; } Context optionsContext = options.getContext(); if (optionsContext != null && optionsContext != Context.NONE) { for (Map.Entry<Object, Object> kvp : optionsContext.getValues().entrySet()) { context = context.addData(kvp.getKey(), kvp.getValue()); } } return context; } static Flux<ByteBuffer> validateLength(final HttpRequest request) { final Flux<ByteBuffer> bbFlux = request.getBody(); if (bbFlux == null) { return Flux.empty(); } final long expectedLength = Long.parseLong(request.getHeaders().getValue("Content-Length")); return Flux.defer(() -> { final long[] currentTotalLength = new long[1]; return Flux.concat(bbFlux, Flux.just(VALIDATION_BUFFER)).handle((buffer, sink) -> { if (buffer == null) { return; } if (buffer == VALIDATION_BUFFER) { if (expectedLength != currentTotalLength[0]) { sink.error(new UnexpectedLengthException(String.format(BODY_TOO_SMALL, currentTotalLength[0], expectedLength), currentTotalLength[0], expectedLength)); } else { sink.complete(); } return; } currentTotalLength[0] += buffer.remaining(); if (currentTotalLength[0] > expectedLength) { sink.error(new UnexpectedLengthException(String.format(BODY_TOO_LARGE, currentTotalLength[0], expectedLength), currentTotalLength[0], expectedLength)); return; } sink.next(buffer); }); }); } /** * Starts the tracing span for the current service call, additionally set metadata attributes on the span by passing * additional context information. * * @param method Service method being called. * @param context Context information about the current service call. * @return The updated context containing the span context. */ private Context startTracingSpan(Method method, Context context) { if (!TracerProxy.isTracingEnabled()) { return context; } if ((boolean) context.getData(Tracer.DISABLE_TRACING_KEY).orElse(false)) { return context; } String spanName = interfaceParser.getServiceName() + "." + method.getName(); context = TracerProxy.setSpanName(spanName, context); return TracerProxy.start(spanName, context); } /** * Create a HttpRequest for the provided Swagger method using the provided arguments. * * @param methodParser the Swagger method parser to use * @param args the arguments to use to populate the method's annotation values * @return a HttpRequest * @throws IOException thrown if the body contents cannot be serialized */ private HttpRequest createHttpRequest(SwaggerMethodParser methodParser, Object[] args) throws IOException { final String path = methodParser.setPath(args); final UrlBuilder pathUrlBuilder = UrlBuilder.parse(path); final UrlBuilder urlBuilder; if (pathUrlBuilder.getScheme() != null) { urlBuilder = pathUrlBuilder; } else { urlBuilder = new UrlBuilder(); methodParser.setSchemeAndHost(args, urlBuilder); if (path != null && !path.isEmpty() && !"/".equals(path)) { String hostPath = urlBuilder.getPath(); if (hostPath == null || hostPath.isEmpty() || "/".equals(hostPath) || path.contains(": urlBuilder.setPath(path); } else { if (path.startsWith("/")) { urlBuilder.setPath(hostPath + path); } else { urlBuilder.setPath(hostPath + "/" + path); } } } } methodParser.setEncodedQueryParameters(args, urlBuilder); final URL url = urlBuilder.toUrl(); final HttpRequest request = configRequest(new HttpRequest(methodParser.getHttpMethod(), url), methodParser, args); HttpHeaders httpHeaders = request.getHeaders(); methodParser.setHeaders(args, httpHeaders); return request; } @SuppressWarnings("unchecked") private HttpRequest configRequest(final HttpRequest request, final SwaggerMethodParser methodParser, final Object[] args) throws IOException { final Object bodyContentObject = methodParser.setBody(args); if (bodyContentObject == null) { request.getHeaders().set("Content-Length", "0"); } else { String contentType = methodParser.getBodyContentType(); if (contentType == null || contentType.isEmpty()) { if (bodyContentObject instanceof byte[] || bodyContentObject instanceof String) { contentType = ContentType.APPLICATION_OCTET_STREAM; } else { contentType = ContentType.APPLICATION_JSON; } } request.getHeaders().set("Content-Type", contentType); if (bodyContentObject instanceof BinaryData) { BinaryData binaryData = (BinaryData) bodyContentObject; if (binaryData.getLength() != null) { request.setHeader("Content-Length", binaryData.getLength().toString()); } request.setBody(binaryData.toFluxByteBuffer()); return request; } boolean isJson = false; final String[] contentTypeParts = contentType.split(";"); for (final String contentTypePart : contentTypeParts) { if (contentTypePart.trim().equalsIgnoreCase(ContentType.APPLICATION_JSON)) { isJson = true; break; } } if (isJson) { ByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(); serializer.serialize(bodyContentObject, SerializerEncoding.JSON, stream); request.setHeader("Content-Length", String.valueOf(stream.size())); request.setBody(Flux.defer(() -> Flux.just(ByteBuffer.wrap(stream.toByteArray(), 0, stream.size())))); } else if (FluxUtil.isFluxByteBuffer(methodParser.getBodyJavaType())) { request.setBody((Flux<ByteBuffer>) bodyContentObject); } else if (bodyContentObject instanceof byte[]) { request.setBody((byte[]) bodyContentObject); } else if (bodyContentObject instanceof String) { final String bodyContentString = (String) bodyContentObject; if (!bodyContentString.isEmpty()) { request.setBody(bodyContentString); } } else if (bodyContentObject instanceof ByteBuffer) { request.setBody(Flux.just((ByteBuffer) bodyContentObject)); } else { ByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(); serializer.serialize(bodyContentObject, SerializerEncoding.fromHeaders(request.getHeaders()), stream); request.setHeader("Content-Length", String.valueOf(stream.size())); request.setBody(Flux.defer(() -> Flux.just(ByteBuffer.wrap(stream.toByteArray(), 0, stream.size())))); } } return request; } private Mono<HttpDecodedResponse> ensureExpectedStatus(final Mono<HttpDecodedResponse> asyncDecodedResponse, final SwaggerMethodParser methodParser, RequestOptions options) { return asyncDecodedResponse .flatMap(decodedHttpResponse -> ensureExpectedStatus(decodedHttpResponse, methodParser, options)); } private static Exception instantiateUnexpectedException(final UnexpectedExceptionInformation exception, final HttpResponse httpResponse, final byte[] responseContent, final Object responseDecodedContent) { final int responseStatusCode = httpResponse.getStatusCode(); final String contentType = httpResponse.getHeaderValue("Content-Type"); final String bodyRepresentation; if ("application/octet-stream".equalsIgnoreCase(contentType)) { bodyRepresentation = "(" + httpResponse.getHeaderValue("Content-Length") + "-byte body)"; } else { bodyRepresentation = responseContent == null || responseContent.length == 0 ? "(empty body)" : "\"" + new String(responseContent, StandardCharsets.UTF_8) + "\""; } Exception result; try { final Constructor<? extends HttpResponseException> exceptionConstructor = exception.getExceptionType() .getConstructor(String.class, HttpResponse.class, exception.getExceptionBodyType()); result = exceptionConstructor.newInstance("Status code " + responseStatusCode + ", " + bodyRepresentation, httpResponse, responseDecodedContent); } catch (ReflectiveOperationException e) { String message = "Status code " + responseStatusCode + ", but an instance of " + exception.getExceptionType().getCanonicalName() + " cannot be created." + " Response body: " + bodyRepresentation; result = new IOException(message, e); } return result; } /** * Create a publisher that (1) emits error if the provided response {@code decodedResponse} has 'disallowed status * code' OR (2) emits provided response if it's status code ia allowed. * * 'disallowed status code' is one of the status code defined in the provided SwaggerMethodParser or is in the int[] * of additional allowed status codes. * * @param decodedResponse The HttpResponse to check. * @param methodParser The method parser that contains information about the service interface method that initiated * the HTTP request. * @return An async-version of the provided decodedResponse. */ private Mono<HttpDecodedResponse> ensureExpectedStatus(final HttpDecodedResponse decodedResponse, final SwaggerMethodParser methodParser, RequestOptions options) { final int responseStatusCode = decodedResponse.getSourceResponse().getStatusCode(); if (methodParser.isExpectedResponseStatusCode(responseStatusCode) || (options != null && !options.isThrowOnError())) { return Mono.just(decodedResponse); } return decodedResponse.getSourceResponse().getBodyAsByteArray() .switchIfEmpty(Mono.defer(() -> { return Mono.error(instantiateUnexpectedException( methodParser.getUnexpectedException(responseStatusCode), decodedResponse.getSourceResponse(), null, null)); })) .flatMap(responseBytes -> decodedResponse.getDecodedBody(responseBytes) .switchIfEmpty(Mono.defer(() -> { return Mono.error(instantiateUnexpectedException( methodParser.getUnexpectedException(responseStatusCode), decodedResponse.getSourceResponse(), responseBytes, null)); })) .flatMap(decodedBody -> { return Mono.error(instantiateUnexpectedException( methodParser.getUnexpectedException(responseStatusCode), decodedResponse.getSourceResponse(), responseBytes, decodedBody)); })); } private Mono<?> handleRestResponseReturnType(final HttpDecodedResponse response, final SwaggerMethodParser methodParser, final Type entityType) { if (TypeUtil.isTypeOrSubTypeOf(entityType, Response.class)) { final Type bodyType = TypeUtil.getRestResponseBodyType(entityType); if (TypeUtil.isTypeOrSubTypeOf(bodyType, Void.class)) { return response.getSourceResponse().getBody().ignoreElements() .then(createResponse(response, entityType, null)); } else { return handleBodyReturnType(response, methodParser, bodyType) .flatMap(bodyAsObject -> createResponse(response, entityType, bodyAsObject)) .switchIfEmpty(Mono.defer((Supplier<Mono<Response<?>>>) () -> createResponse(response, entityType, null))); } } else { return handleBodyReturnType(response, methodParser, entityType); } } @SuppressWarnings("unchecked") private Mono<Response<?>> createResponse(HttpDecodedResponse response, Type entityType, Object bodyAsObject) { Class<? extends Response<?>> cls = (Class<? extends Response<?>>) TypeUtil.getRawClass(entityType); if (cls.equals(Response.class)) { cls = (Class<? extends Response<?>>) (Object) ResponseBase.class; } else if (cls.equals(PagedResponse.class)) { cls = (Class<? extends Response<?>>) (Object) PagedResponseBase.class; if (bodyAsObject != null && !TypeUtil.isTypeOrSubTypeOf(bodyAsObject.getClass(), Page.class)) { return monoError(logger, new RuntimeException(MUST_IMPLEMENT_PAGE_ERROR)); } } final Class<? extends Response<?>> clsFinal = cls; return Mono.just(RESPONSE_CONSTRUCTORS_CACHE.get(clsFinal)) .switchIfEmpty(Mono.defer(() -> Mono.error(new RuntimeException("Cannot find suitable constructor for class " + clsFinal)))) .flatMap(ctr -> RESPONSE_CONSTRUCTORS_CACHE.invoke(ctr, response, bodyAsObject)); } private Mono<?> handleBodyReturnType(final HttpDecodedResponse response, final SwaggerMethodParser methodParser, final Type entityType) { final int responseStatusCode = response.getSourceResponse().getStatusCode(); final HttpMethod httpMethod = methodParser.getHttpMethod(); final Type returnValueWireType = methodParser.getReturnValueWireType(); final Mono<?> asyncResult; if (httpMethod == HttpMethod.HEAD && (TypeUtil.isTypeOrSubTypeOf( entityType, Boolean.TYPE) || TypeUtil.isTypeOrSubTypeOf(entityType, Boolean.class))) { boolean isSuccess = (responseStatusCode / 100) == 2; asyncResult = Mono.just(isSuccess); } else if (TypeUtil.isTypeOrSubTypeOf(entityType, byte[].class)) { Mono<byte[]> responseBodyBytesAsync = response.getSourceResponse().getBodyAsByteArray(); if (returnValueWireType == Base64Url.class) { responseBodyBytesAsync = responseBodyBytesAsync .mapNotNull(base64UrlBytes -> new Base64Url(base64UrlBytes).decodedBytes()); } asyncResult = responseBodyBytesAsync; } else if (FluxUtil.isFluxByteBuffer(entityType)) { asyncResult = Mono.just(response.getSourceResponse().getBody()); } else if (TypeUtil.isTypeOrSubTypeOf(entityType, BinaryData.class)) { asyncResult = BinaryData.fromFlux(response.getSourceResponse().getBody()); } else { asyncResult = response.getDecodedBody((byte[]) null); } return asyncResult; } /** * Handle the provided asynchronous HTTP response and return the deserialized value. * * @param asyncHttpDecodedResponse the asynchronous HTTP response to the original HTTP request * @param methodParser the SwaggerMethodParser that the request originates from * @param returnType the type of value that will be returned * @param context Additional context that is passed through the Http pipeline during the service call. * @return the deserialized result */ private Object handleRestReturnType(final Mono<HttpDecodedResponse> asyncHttpDecodedResponse, final SwaggerMethodParser methodParser, final Type returnType, final Context context, final RequestOptions options) { final Mono<HttpDecodedResponse> asyncExpectedResponse = ensureExpectedStatus(asyncHttpDecodedResponse, methodParser, options) .doOnEach(RestProxy::endTracingSpan) .contextWrite(reactor.util.context.Context.of("TRACING_CONTEXT", context)); final Object result; if (TypeUtil.isTypeOrSubTypeOf(returnType, Mono.class)) { final Type monoTypeParam = TypeUtil.getTypeArgument(returnType); if (TypeUtil.isTypeOrSubTypeOf(monoTypeParam, Void.class)) { result = asyncExpectedResponse.then(); } else { result = asyncExpectedResponse.flatMap(response -> handleRestResponseReturnType(response, methodParser, monoTypeParam)); } } else if (FluxUtil.isFluxByteBuffer(returnType)) { result = asyncExpectedResponse.flatMapMany(ar -> ar.getSourceResponse().getBody()); } else if (TypeUtil.isTypeOrSubTypeOf(returnType, void.class) || TypeUtil.isTypeOrSubTypeOf(returnType, Void.class)) { asyncExpectedResponse.block(); result = null; } else { result = asyncExpectedResponse .flatMap(httpResponse -> handleRestResponseReturnType(httpResponse, methodParser, returnType)) .block(); } return result; } private static void endTracingSpan(Signal<HttpDecodedResponse> signal) { if (!TracerProxy.isTracingEnabled()) { return; } if (signal.isOnComplete() || signal.isOnSubscribe()) { return; } ContextView context = signal.getContextView(); Optional<Context> tracingContext = context.getOrEmpty("TRACING_CONTEXT"); boolean disableTracing = Boolean.TRUE.equals(context.getOrDefault(Tracer.DISABLE_TRACING_KEY, false)); if (!tracingContext.isPresent() || disableTracing) { return; } int statusCode = 0; HttpDecodedResponse httpDecodedResponse; Throwable throwable = null; if (signal.hasValue()) { httpDecodedResponse = signal.get(); statusCode = httpDecodedResponse.getSourceResponse().getStatusCode(); } else if (signal.hasError()) { throwable = signal.getThrowable(); if (throwable instanceof HttpResponseException) { HttpResponseException exception = (HttpResponseException) throwable; statusCode = exception.getResponse().getStatusCode(); } } TracerProxy.end(statusCode, throwable, tracingContext.get()); } /** * Create an instance of the default serializer. * * @return the default serializer */ private static SerializerAdapter createDefaultSerializer() { return JacksonAdapter.createDefaultSerializerAdapter(); } /** * Create the default HttpPipeline. * * @return the default HttpPipeline */ private static HttpPipeline createDefaultPipeline() { List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new UserAgentPolicy()); policies.add(new RetryPolicy()); policies.add(new CookiePolicy()); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param <A> the type of the Swagger interface * @return a proxy implementation of the provided Swagger interface */ public static <A> A create(Class<A> swaggerInterface) { return create(swaggerInterface, createDefaultPipeline(), createDefaultSerializer()); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param httpPipeline the HttpPipelinePolicy and HttpClient pipeline that will be used to send Http requests * @param <A> the type of the Swagger interface * @return a proxy implementation of the provided Swagger interface */ public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline) { return create(swaggerInterface, httpPipeline, createDefaultSerializer()); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param httpPipeline the HttpPipelinePolicy and HttpClient pipline that will be used to send Http requests * @param serializer the serializer that will be used to convert POJOs to and from request and response bodies * @param <A> the type of the Swagger interface. * @return a proxy implementation of the provided Swagger interface */ @SuppressWarnings("unchecked") public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline, SerializerAdapter serializer) { final SwaggerInterfaceParser interfaceParser = new SwaggerInterfaceParser(swaggerInterface, serializer); final RestProxy restProxy = new RestProxy(httpPipeline, serializer, interfaceParser); return (A) Proxy.newProxyInstance(swaggerInterface.getClassLoader(), new Class<?>[]{swaggerInterface}, restProxy); } }
There is a lot of duplicated code here. Can we make this into a method?
Mono<Response<QueueProperties>> updateQueueWithResponse(QueueProperties queue, Context context) { if (queue == null) { return monoError(logger, new NullPointerException("'queue' cannot be null")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final HttpHeaders supplementaryAuthHeaders = new HttpHeaders(); Context additionalContext = context.addData(AZ_TRACING_NAMESPACE_KEY, SERVICE_BUS_TRACING_NAMESPACE_VALUE); if (!CoreUtils.isNullOrEmpty(queue.getForwardTo())) { addAdditionalAuthHeader(SERVICE_BUS_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, supplementaryAuthHeaders); queue.setForwardTo(String.format("https: queue.getForwardTo())); } if (!CoreUtils.isNullOrEmpty(queue.getForwardDeadLetteredMessagesTo())) { addAdditionalAuthHeader(SERVICE_BUS_DLQ_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, supplementaryAuthHeaders); queue.setForwardDeadLetteredMessagesTo(String.format("https: queue.getForwardDeadLetteredMessagesTo())); } if (supplementaryAuthHeaders.getSize() != 0) { additionalContext = additionalContext.addData(AZURE_REQUEST_HTTP_HEADERS_KEY, supplementaryAuthHeaders); } final QueueDescription queueDescription = EntityHelper.toImplementation(queue); final CreateQueueBodyContent content = new CreateQueueBodyContent() .setType(CONTENT_TYPE) .setQueueDescription(queueDescription); final CreateQueueBody createEntity = new CreateQueueBody() .setContent(content); try { return entityClient.putWithResponseAsync(queue.getName(), createEntity, "*", additionalContext) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeQueue(response)); } catch (RuntimeException ex) { return monoError(logger, ex); } }
final HttpHeaders supplementaryAuthHeaders = new HttpHeaders();
return monoError(logger, new NullPointerException("'queue' cannot be null")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); }
class ServiceBusAdministrationAsyncClient { private static final String SERVICE_BUS_TRACING_NAMESPACE_VALUE = "Microsoft.ServiceBus"; private static final String SERVICE_BUS_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME = "ServiceBusSupplementaryAuthorization"; private static final String SERVICE_BUS_DLQ_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME = "ServiceBusDlqSupplementaryAuthorization"; private static final String CONTENT_TYPE = "application/xml"; private static final String QUEUES_ENTITY_TYPE = "queues"; private static final String TOPICS_ENTITY_TYPE = "topics"; private static final int NUMBER_OF_ELEMENTS = 100; private final ServiceBusManagementClientImpl managementClient; private final EntitiesImpl entityClient; private final ClientLogger logger = new ClientLogger(ServiceBusAdministrationAsyncClient.class); private final ServiceBusManagementSerializer serializer; private final RulesImpl rulesClient; private final TokenCredential tokenCredential; /** * Creates a new instance with the given management client and serializer. * @param managementClient Client to make management calls. * @param serializer Serializer to deserialize ATOM XML responses. * @param credential Credential to get additional tokens if necessary */ ServiceBusAdministrationAsyncClient(ServiceBusManagementClientImpl managementClient, ServiceBusManagementSerializer serializer, TokenCredential credential) { this.serializer = Objects.requireNonNull(serializer, "'serializer' cannot be null."); this.managementClient = Objects.requireNonNull(managementClient, "'managementClient' cannot be null."); this.entityClient = managementClient.getEntities(); this.rulesClient = managementClient.getRules(); this.tokenCredential = credential; } /** * Creates a queue with the given name. * * @param queueName Name of the queue to create. * * @return A Mono that completes with information about the created queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code queueName} is null. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws ResourceExistsException if a queue exists with the same {@code queueName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueProperties> createQueue(String queueName) { try { return createQueue(queueName, new CreateQueueOptions()); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Creates a queue with the {@link CreateQueueOptions} and given queue name. * * @param queueName Name of the queue to create. * @param queueOptions Options about the queue to create. * * @return A Mono that completes with information about the created queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} or {@code queueOptions} is null. * @throws ResourceExistsException if a queue exists with the same {@link QueueProperties * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueProperties> createQueue(String queueName, CreateQueueOptions queueOptions) { return createQueueWithResponse(queueName, queueOptions).map(Response::getValue); } /** * Creates a queue and returns the created queue in addition to the HTTP response. * * @param queueName Name of the queue to create. * @param queueOptions Options about the queue to create. * * @return A Mono that returns the created queue in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} or {@code queueOptions} is null. * @throws ResourceExistsException if a queue exists with the same {@link QueueProperties * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<QueueProperties>> createQueueWithResponse(String queueName, CreateQueueOptions queueOptions) { return withContext(context -> createQueueWithResponse(queueName, queueOptions, context)); } /** * Creates a rule under the given topic and subscription * * @param topicName Name of the topic associated with rule. * @param subscriptionName Name of the subscription associated with the rule. * @param ruleName Name of the rule. * * @return A Mono that completes with information about the created rule. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} are are empty strings. * @throws NullPointerException if {@code topicName} or {@code ruleName} are are null. * @throws ResourceExistsException if a rule exists with the same topic, subscription, and rule name. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<RuleProperties> createRule(String topicName, String subscriptionName, String ruleName) { try { return createRule(topicName, subscriptionName, ruleName, new CreateRuleOptions()); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Creates a rule with the {@link CreateRuleOptions}. * * @param topicName Name of the topic associated with rule. * @param subscriptionName Name of the subscription associated with the rule. * @param ruleName Name of the rule. * @param ruleOptions Information about the rule to create. * * @return A Mono that completes with information about the created rule. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} are are empty strings. * @throws NullPointerException if {@code topicName}, {@code ruleName}, or {@code ruleOptions} * are are null. * @throws ResourceExistsException if a rule exists with the same topic and rule name. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<RuleProperties> createRule(String topicName, String subscriptionName, String ruleName, CreateRuleOptions ruleOptions) { return createRuleWithResponse(topicName, subscriptionName, ruleName, ruleOptions) .map(Response::getValue); } /** * Creates a rule and returns the created rule in addition to the HTTP response. * * @param topicName Name of the topic associated with rule. * @param subscriptionName Name of the subscription associated with the rule. * @param ruleName Name of the rule. * @param ruleOptions Information about the rule to create. * * @return A Mono that returns the created rule in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} are are empty strings. * @throws NullPointerException if {@code topicName}, {@code ruleName}, or {@code ruleOptions} * are are null. * @throws ResourceExistsException if a rule exists with the same topic and rule name. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<RuleProperties>> createRuleWithResponse(String topicName, String subscriptionName, String ruleName, CreateRuleOptions ruleOptions) { return withContext(context -> createRuleWithResponse(topicName, subscriptionName, ruleName, ruleOptions, context)); } /** * Creates a subscription with the given topic and subscription names. * * @param topicName Name of the topic associated with subscription. * @param subscriptionName Name of the subscription. * * @return A Mono that completes with information about the created subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are are empty strings. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} are are null. * @throws ResourceExistsException if a subscription exists with the same topic and subscription name. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionProperties> createSubscription(String topicName, String subscriptionName) { try { return createSubscription(topicName, subscriptionName, new CreateSubscriptionOptions()); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Creates a subscription with the {@link CreateSubscriptionOptions}. * * @param topicName Name of the topic associated with subscription. * @param subscriptionName Name of the subscription. * @param subscriptionOptions Information about the subscription to create. * * @return A Mono that completes with information about the created subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are are empty strings. * @throws NullPointerException if {@code topicName}, {@code subscriptionName}, or {@code subscriptionOptions} * are are null. * @throws ResourceExistsException if a subscription exists with the same topic and subscription name. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionProperties> createSubscription(String topicName, String subscriptionName, CreateSubscriptionOptions subscriptionOptions) { return createSubscriptionWithResponse(topicName, subscriptionName, subscriptionOptions) .map(Response::getValue); } /** * Creates a subscription and returns the created subscription in addition to the HTTP response. * * @param topicName Name of the topic associated with subscription. * @param subscriptionName Name of the subscription. * @param subscriptionOptions Information about the subscription to create. * * @return A Mono that returns the created subscription in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are are empty strings. * @throws NullPointerException if {@code topicName}, {@code subscriptionName}, or {@code subscriptionOptions} * are are null. * @throws ResourceExistsException if a subscription exists with the same topic and subscription name. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SubscriptionProperties>> createSubscriptionWithResponse(String topicName, String subscriptionName, CreateSubscriptionOptions subscriptionOptions) { return withContext(context -> createSubscriptionWithResponse(topicName, subscriptionName, subscriptionOptions, context)); } /** * Creates a topic with the given name. * * @param topicName Name of the topic to create. * * @return A Mono that completes with information about the created topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code topicName} is null. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws ResourceExistsException if a topic exists with the same {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicProperties> createTopic(String topicName) { try { return createTopic(topicName, new CreateTopicOptions()); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Creates a topic with the {@link CreateTopicOptions}. * * @param topicName Name of the topic to create. * @param topicOptions The options used to create the topic. * * @return A Mono that completes with information about the created topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code topicOptions} is null. * @throws ResourceExistsException if a topic exists with the same {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicProperties> createTopic(String topicName, CreateTopicOptions topicOptions) { return createTopicWithResponse(topicName, topicOptions).map(Response::getValue); } /** * Creates a topic and returns the created topic in addition to the HTTP response. * * @param topicName Name of the topic to create. * @param topicOptions The options used to create the topic. * * @return A Mono that returns the created topic in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link TopicProperties * string. * @throws NullPointerException if {@code topicName} or {@code topicOptions} is null. * @throws ResourceExistsException if a topic exists with the same {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<TopicProperties>> createTopicWithResponse(String topicName, CreateTopicOptions topicOptions) { return withContext(context -> createTopicWithResponse(topicName, topicOptions, context)); } /** * Deletes a queue the matching {@code queueName}. * * @param queueName Name of queue to delete. * * @return A Mono that completes when the queue is deleted. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws NullPointerException if {@code queueName} is null. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteQueue(String queueName) { return deleteQueueWithResponse(queueName).then(); } /** * Deletes a queue the matching {@code queueName} and returns the HTTP response. * * @param queueName Name of queue to delete. * * @return A Mono that completes when the queue is deleted and returns the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws NullPointerException if {@code queueName} is null. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteQueueWithResponse(String queueName) { return withContext(context -> deleteQueueWithResponse(queueName, context)); } /** * Deletes a rule the matching {@code ruleName}. * * @param topicName Name of topic associated with rule to delete. * @param subscriptionName Name of the subscription associated with the rule to delete. * @param ruleName Name of rule to delete. * * @return A Mono that completes when the rule is deleted. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code ruleName} is null. * @throws ResourceNotFoundException if the {@code ruleName} does not exist. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteRule(String topicName, String subscriptionName, String ruleName) { return deleteRuleWithResponse(topicName, subscriptionName, ruleName).then(); } /** * Deletes a rule the matching {@code ruleName} and returns the HTTP response. * * @param topicName Name of topic associated with rule to delete. * @param subscriptionName Name of the subscription associated with the rule to delete. * @param ruleName Name of rule to delete. * * @return A Mono that completes when the rule is deleted and returns the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName}, {@code subscriptionName}, or {@code ruleName} is an * empty string. * @throws NullPointerException if {@code topicName}, {@code subscriptionName}, or {@code ruleName} is null. * @throws ResourceNotFoundException if the {@code ruleName} does not exist. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteRuleWithResponse(String topicName, String subscriptionName, String ruleName) { return withContext(context -> deleteRuleWithResponse(topicName, subscriptionName, ruleName, context)); } /** * Deletes a subscription the matching {@code subscriptionName}. * * @param topicName Name of topic associated with subscription to delete. * @param subscriptionName Name of subscription to delete. * * @return A Mono that completes when the subscription is deleted. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} is null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteSubscription(String topicName, String subscriptionName) { return deleteSubscriptionWithResponse(topicName, subscriptionName).then(); } /** * Deletes a subscription the matching {@code subscriptionName} and returns the HTTP response. * * @param topicName Name of topic associated with subscription to delete. * @param subscriptionName Name of subscription to delete. * * @return A Mono that completes when the subscription is deleted and returns the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} is null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteSubscriptionWithResponse(String topicName, String subscriptionName) { return withContext(context -> deleteSubscriptionWithResponse(topicName, subscriptionName, context)); } /** * Deletes a topic the matching {@code topicName}. * * @param topicName Name of topic to delete. * * @return A Mono that completes when the topic is deleted. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteTopic(String topicName) { return deleteTopicWithResponse(topicName).then(); } /** * Deletes a topic the matching {@code topicName} and returns the HTTP response. * * @param topicName Name of topic to delete. * * @return A Mono that completes when the topic is deleted and returns the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteTopicWithResponse(String topicName) { return withContext(context -> deleteTopicWithResponse(topicName, context)); } /** * Gets information about the queue. * * @param queueName Name of queue to get information about. * * @return A Mono that completes with information about the queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueProperties> getQueue(String queueName) { return getQueueWithResponse(queueName).map(Response::getValue); } /** * Gets information about the queue along with its HTTP response. * * @param queueName Name of queue to get information about. * * @return A Mono that completes with information about the queue and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<QueueProperties>> getQueueWithResponse(String queueName) { return withContext(context -> getQueueWithResponse(queueName, context, Function.identity())); } /** * Gets whether or not a queue with {@code queueName} exists in the Service Bus namespace. * * @param queueName Name of the queue. * * @return A Mono that completes indicating whether or not the queue exists. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Boolean> getQueueExists(String queueName) { return getQueueExistsWithResponse(queueName).map(Response::getValue); } /** * Gets whether or not a queue with {@code queueName} exists in the Service Bus namespace. * * @param queueName Name of the queue. * * @return A Mono that completes indicating whether or not the queue exists along with its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Boolean>> getQueueExistsWithResponse(String queueName) { return getEntityExistsWithResponse(getQueueWithResponse(queueName)); } /** * Gets runtime properties about the queue. * * @param queueName Name of queue to get information about. * * @return A Mono that completes with runtime properties about the queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueRuntimeProperties> getQueueRuntimeProperties(String queueName) { return getQueueRuntimePropertiesWithResponse(queueName).map(response -> response.getValue()); } /** * Gets runtime properties about the queue along with its HTTP response. * * @param queueName Name of queue to get information about. * * @return A Mono that completes with runtime properties about the queue and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<QueueRuntimeProperties>> getQueueRuntimePropertiesWithResponse(String queueName) { return withContext(context -> getQueueWithResponse(queueName, context, QueueRuntimeProperties::new)); } /** * Gets information about the Service Bus namespace. * * @return A Mono that completes with information about the Service Bus namespace. * @throws ClientAuthenticationException if the client's credentials do not have access to the namespace. * @throws HttpResponseException If error occurred processing the request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<NamespaceProperties> getNamespaceProperties() { return getNamespacePropertiesWithResponse().map(Response::getValue); } /** * Gets information about the Service Bus namespace along with its HTTP response. * * @return A Mono that completes with information about the namespace and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<NamespaceProperties>> getNamespacePropertiesWithResponse() { return withContext(this::getNamespacePropertiesWithResponse); } /** * Gets a rule from the service namespace. * * Only following data types are deserialized in Filters and Action parameters - string, int, long, boolean, double, * and OffsetDateTime. Other data types would return its string value. * * @param topicName The name of the topic relative to service bus namespace. * @param subscriptionName The subscription name the rule belongs to. * @param ruleName The name of the rule to retrieve. * * @return The associated rule. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<RuleProperties> getRule(String topicName, String subscriptionName, String ruleName) { return getRuleWithResponse(topicName, subscriptionName, ruleName).map(response -> response.getValue()); } /** * Gets a rule from the service namespace. * * Only following data types are deserialized in Filters and Action parameters - string, int, long, bool, double, * and OffsetDateTime. Other data types would return its string value. * * @param topicName The name of the topic relative to service bus namespace. * @param subscriptionName The subscription name the rule belongs to. * @param ruleName The name of the rule to retrieve. * * @return The associated rule with the corresponding HTTP response. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<RuleProperties>> getRuleWithResponse(String topicName, String subscriptionName, String ruleName) { return withContext(context -> getRuleWithResponse(topicName, subscriptionName, ruleName, context)); } /** * Gets information about the queue. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return A Mono that completes with information about the subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are empty strings. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} are null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist in the {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionProperties> getSubscription(String topicName, String subscriptionName) { return getSubscriptionWithResponse(topicName, subscriptionName).map(Response::getValue); } /** * Gets information about the subscription along with its HTTP response. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return A Mono that completes with information about the subscription and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are empty strings. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} are null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SubscriptionProperties>> getSubscriptionWithResponse(String topicName, String subscriptionName) { return withContext(context -> getSubscriptionWithResponse(topicName, subscriptionName, context, Function.identity())); } /** * Gets whether or not a subscription within a topic exists. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of the subscription. * * @return A Mono that completes indicating whether or not the subscription exists. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code subscriptionName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Boolean> getSubscriptionExists(String topicName, String subscriptionName) { return getSubscriptionExistsWithResponse(topicName, subscriptionName).map(Response::getValue); } /** * Gets whether or not a subscription within a topic exists. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of the subscription. * * @return A Mono that completes indicating whether or not the subscription exists along with its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code subscriptionName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Boolean>> getSubscriptionExistsWithResponse(String topicName, String subscriptionName) { return getEntityExistsWithResponse(getSubscriptionWithResponse(topicName, subscriptionName)); } /** * Gets runtime properties about the subscription. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return A Mono that completes with runtime properties about the subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are empty strings. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} are null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionRuntimeProperties> getSubscriptionRuntimeProperties( String topicName, String subscriptionName) { return getSubscriptionRuntimePropertiesWithResponse(topicName, subscriptionName) .map(response -> response.getValue()); } /** * Gets runtime properties about the subscription. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return A Mono that completes with runtime properties about the subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code subscriptionName} is null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SubscriptionRuntimeProperties>> getSubscriptionRuntimePropertiesWithResponse( String topicName, String subscriptionName) { return withContext(context -> getSubscriptionWithResponse(topicName, subscriptionName, context, SubscriptionRuntimeProperties::new)); } /** * Gets information about the topic. * * @param topicName Name of topic to get information about. * * @return A Mono that completes with information about the topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicProperties> getTopic(String topicName) { return getTopicWithResponse(topicName).map(Response::getValue); } /** * Gets information about the topic along with its HTTP response. * * @param topicName Name of topic to get information about. * * @return A Mono that completes with information about the topic and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<TopicProperties>> getTopicWithResponse(String topicName) { return withContext(context -> getTopicWithResponse(topicName, context, Function.identity())); } /** * Gets whether or not a topic with {@code topicName} exists in the Service Bus namespace. * * @param topicName Name of the topic. * * @return A Mono that completes indicating whether or not the topic exists. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Boolean> getTopicExists(String topicName) { return getTopicExistsWithResponse(topicName).map(Response::getValue); } /** * Gets whether or not a topic with {@code topicName} exists in the Service Bus namespace. * * @param topicName Name of the topic. * * @return A Mono that completes indicating whether or not the topic exists along with its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Boolean>> getTopicExistsWithResponse(String topicName) { return getEntityExistsWithResponse(getTopicWithResponse(topicName)); } /** * Gets runtime properties about the topic. * * @param topicName Name of topic to get information about. * * @return A Mono that completes with runtime properties about the topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicRuntimeProperties> getTopicRuntimeProperties(String topicName) { return getTopicRuntimePropertiesWithResponse(topicName).map(response -> response.getValue()); } /** * Gets runtime properties about the topic with its HTTP response. * * @param topicName Name of topic to get information about. * * @return A Mono that completes with runtime properties about the topic and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<TopicRuntimeProperties>> getTopicRuntimePropertiesWithResponse(String topicName) { return withContext(context -> getTopicWithResponse(topicName, context, TopicRuntimeProperties::new)); } /** * Fetches all the queues in the Service Bus namespace. * * @return A Flux of {@link QueueProperties queues} in the Service Bus namespace. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<QueueProperties> listQueues() { return new PagedFlux<>( () -> withContext(context -> listQueuesFirstPage(context)), token -> withContext(context -> listQueuesNextPage(token, context))); } /** * Fetches all the rules for a topic and subscription. * * @param topicName The topic name under which all the rules need to be retrieved. * @param subscriptionName The name of the subscription for which all rules need to be retrieved. * * @return A Flux of {@link RuleProperties rules} for the {@code topicName} and {@code subscriptionName}. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} is null. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} is an empty string. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<RuleProperties> listRules(String topicName, String subscriptionName) { if (topicName == null) { return pagedFluxError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return pagedFluxError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } return new PagedFlux<>( () -> withContext(context -> listRulesFirstPage(topicName, subscriptionName, context)), token -> withContext(context -> listRulesNextPage(topicName, subscriptionName, token, context))); } /** * Fetches all the subscriptions for a topic. * * @param topicName The topic name under which all the subscriptions need to be retrieved. * * @return A Flux of {@link SubscriptionProperties subscriptions} for the {@code topicName}. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws NullPointerException if {@code topicName} is null. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<SubscriptionProperties> listSubscriptions(String topicName) { if (topicName == null) { return pagedFluxError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return pagedFluxError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } return new PagedFlux<>( () -> withContext(context -> listSubscriptionsFirstPage(topicName, context)), token -> withContext(context -> listSubscriptionsNextPage(topicName, token, context))); } /** * Fetches all the topics in the Service Bus namespace. * * @return A Flux of {@link TopicProperties topics} in the Service Bus namespace. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<TopicProperties> listTopics() { return new PagedFlux<>( () -> withContext(context -> listTopicsFirstPage(context)), token -> withContext(context -> listTopicsNextPage(token, context))); } /** * Updates a queue with the given {@link QueueProperties}. The {@link QueueProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link QueueProperties * <li>{@link QueueProperties * <li>{@link QueueProperties * </li> * <li>{@link QueueProperties * </ul> * * @param queue Information about the queue to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that completes with the updated queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code queue} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueProperties> updateQueue(QueueProperties queue) { return updateQueueWithResponse(queue).map(Response::getValue); } /** * Updates a queue with the given {@link QueueProperties}. The {@link QueueProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link QueueProperties * <li>{@link QueueProperties * <li>{@link QueueProperties * </li> * <li>{@link QueueProperties * </ul> * * @param queue Information about the queue to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated queue in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code queue} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<QueueProperties>> updateQueueWithResponse(QueueProperties queue) { return withContext(context -> updateQueueWithResponse(queue, context)); } /** * Updates a rule with the given {@link RuleProperties}. The {@link RuleProperties} must be fully populated as all * of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * @param topicName The topic name under which the rule is updated. * @param subscriptionName The name of the subscription for which the rule is updated. * @param rule Information about the rule to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated rule. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the rule quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link RuleProperties * @throws NullPointerException if {@code rule} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<RuleProperties> updateRule(String topicName, String subscriptionName, RuleProperties rule) { return updateRuleWithResponse(topicName, subscriptionName, rule).map(Response::getValue); } /** * Updates a rule with the given {@link RuleProperties}. The {@link RuleProperties} must be fully populated as all * of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * @param topicName The topic name under which the rule is updated. * @param subscriptionName The name of the subscription for which the rule is updated. * @param rule Information about the rule to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated rule in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the rule quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link RuleProperties * @throws NullPointerException if {@code rule} is null. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<RuleProperties>> updateRuleWithResponse(String topicName, String subscriptionName, RuleProperties rule) { return withContext(context -> updateRuleWithResponse(topicName, subscriptionName, rule, context)); } /** * Updates a subscription with the given {@link SubscriptionProperties}. The {@link SubscriptionProperties} must be * fully populated as all of the properties are replaced. If a property is not set the service default value is * used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * </ul> * * @param subscription Information about the subscription to update. You must provide all the property values * that are desired on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the subscription quota is exceeded, or an * error occurred processing the request. * @throws IllegalArgumentException if {@link SubscriptionProperties * SubscriptionProperties * @throws NullPointerException if {@code subscription} is null. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionProperties> updateSubscription(SubscriptionProperties subscription) { return updateSubscriptionWithResponse(subscription).map(Response::getValue); } /** * Updates a subscription with the given {@link SubscriptionProperties}. The {@link SubscriptionProperties} must be * fully populated as all of the properties are replaced. If a property is not set the service default value is * used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * </ul> * * @param subscription Information about the subscription to update. You must provide all the property values * that are desired on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated subscription in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the subscription quota is exceeded, or an * error occurred processing the request. * @throws IllegalArgumentException if {@link SubscriptionProperties * SubscriptionProperties * @throws NullPointerException if {@code subscription} is null. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SubscriptionProperties>> updateSubscriptionWithResponse( SubscriptionProperties subscription) { return withContext(context -> updateSubscriptionWithResponse(subscription, context)); } /** * Updates a topic with the given {@link TopicProperties}. The {@link TopicProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link TopicProperties * <li>{@link TopicProperties * </li> * </ul> * * @param topic Information about the topic to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that completes with the updated topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link TopicProperties * string. * @throws NullPointerException if {@code topic} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicProperties> updateTopic(TopicProperties topic) { return updateTopicWithResponse(topic).map(Response::getValue); } /** * Updates a topic with the given {@link TopicProperties}. The {@link TopicProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link TopicProperties * <li>{@link TopicProperties * </li> * </ul> * * @param topic Information about the topic to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that completes with the updated topic and its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link TopicProperties * string. * @throws NullPointerException if {@code topic} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<TopicProperties>> updateTopicWithResponse(TopicProperties topic) { return withContext(context -> updateTopicWithResponse(topic, context)); } /** * Creates a queue with its context. * * @param createQueueOptions Queue to create. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link QueueProperties}. */ Mono<Response<QueueProperties>> createQueueWithResponse(String queueName, CreateQueueOptions createQueueOptions, Context context) { if (queueName == null) { return monoError(logger, new NullPointerException("'queueName' cannot be null.")); } else if (queueName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'queueName' cannot be empty.")); } if (createQueueOptions == null) { return monoError(logger, new NullPointerException("'createQueueOptions' cannot be null.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final HttpHeaders supplementaryAuthHeaders = new HttpHeaders(); Context additionalContext = context.addData(AZ_TRACING_NAMESPACE_KEY, SERVICE_BUS_TRACING_NAMESPACE_VALUE); if (!CoreUtils.isNullOrEmpty(createQueueOptions.getForwardTo())) { addAdditionalAuthHeader(SERVICE_BUS_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, supplementaryAuthHeaders); createQueueOptions.setForwardTo(String.format("https: createQueueOptions.getForwardTo())); } if (!CoreUtils.isNullOrEmpty(createQueueOptions.getForwardDeadLetteredMessagesTo())) { addAdditionalAuthHeader(SERVICE_BUS_DLQ_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, supplementaryAuthHeaders); createQueueOptions.setForwardDeadLetteredMessagesTo(String.format("https: createQueueOptions.getForwardDeadLetteredMessagesTo())); } if (supplementaryAuthHeaders.getSize() != 0) { additionalContext = additionalContext.addData(AZURE_REQUEST_HTTP_HEADERS_KEY, supplementaryAuthHeaders); } final QueueDescription description = EntityHelper.getQueueDescription(createQueueOptions); final CreateQueueBodyContent content = new CreateQueueBodyContent() .setType(CONTENT_TYPE) .setQueueDescription(description); final CreateQueueBody createEntity = new CreateQueueBody() .setContent(content); try { return entityClient.putWithResponseAsync(queueName, createEntity, null, additionalContext) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(this::deserializeQueue); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a rule with its context. * * @param ruleOptions Rule to create. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link RuleProperties}. */ Mono<Response<RuleProperties>> createRuleWithResponse(String topicName, String subscriptionName, String ruleName, CreateRuleOptions ruleOptions, Context context) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be empty.")); } if (subscriptionName == null) { return monoError(logger, new NullPointerException("'subscriptionName' cannot be null.")); } else if (subscriptionName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'subscriptionName' cannot be empty.")); } if (ruleName == null) { return monoError(logger, new NullPointerException("'ruleName' cannot be null.")); } else if (ruleName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'ruleName' cannot be empty.")); } if (ruleOptions == null) { return monoError(logger, new NullPointerException("'rule' cannot be null.")); } final RuleActionImpl action = ruleOptions.getAction() != null ? EntityHelper.toImplementation(ruleOptions.getAction()) : null; final RuleFilterImpl filter = ruleOptions.getFilter() != null ? EntityHelper.toImplementation(ruleOptions.getFilter()) : null; final RuleDescription rule = new RuleDescription() .setAction(action) .setFilter(filter) .setName(ruleName); final CreateRuleBodyContent content = new CreateRuleBodyContent() .setType(CONTENT_TYPE) .setRuleDescription(rule); final CreateRuleBody createEntity = new CreateRuleBody().setContent(content); final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, SERVICE_BUS_TRACING_NAMESPACE_VALUE); try { return managementClient.getRules().putWithResponseAsync(topicName, subscriptionName, ruleName, createEntity, null, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeRule(response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a subscription with its context. * * @param subscriptionOptions Subscription to create. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link SubscriptionProperties}. */ Mono<Response<SubscriptionProperties>> createSubscriptionWithResponse(String topicName, String subscriptionName, CreateSubscriptionOptions subscriptionOptions, Context context) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be empty.")); } if (subscriptionName == null) { return monoError(logger, new NullPointerException("'subscriptionName' cannot be null.")); } else if (subscriptionName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'subscriptionName' cannot be empty.")); } if (subscriptionOptions == null) { return monoError(logger, new NullPointerException("'subscription' cannot be null.")); } final HttpHeaders supplementaryAuthHeaders = new HttpHeaders(); Context additionalContext = context.addData(AZ_TRACING_NAMESPACE_KEY, SERVICE_BUS_TRACING_NAMESPACE_VALUE); if (!CoreUtils.isNullOrEmpty(subscriptionOptions.getForwardTo())) { addAdditionalAuthHeader(SERVICE_BUS_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, supplementaryAuthHeaders); subscriptionOptions.setForwardTo(String.format("https: subscriptionOptions.getForwardTo())); } if (!CoreUtils.isNullOrEmpty(subscriptionOptions.getForwardDeadLetteredMessagesTo())) { addAdditionalAuthHeader(SERVICE_BUS_DLQ_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, supplementaryAuthHeaders); subscriptionOptions.setForwardDeadLetteredMessagesTo(String.format("https: subscriptionOptions.getForwardDeadLetteredMessagesTo())); } if (supplementaryAuthHeaders.getSize() != 0) { additionalContext = additionalContext.addData(AZURE_REQUEST_HTTP_HEADERS_KEY, supplementaryAuthHeaders); } final SubscriptionDescription subscription = EntityHelper.getSubscriptionDescription(subscriptionOptions); final CreateSubscriptionBodyContent content = new CreateSubscriptionBodyContent() .setType(CONTENT_TYPE) .setSubscriptionDescription(subscription); final CreateSubscriptionBody createEntity = new CreateSubscriptionBody().setContent(content); try { return managementClient.getSubscriptions().putWithResponseAsync(topicName, subscriptionName, createEntity, null, additionalContext) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeSubscription(topicName, response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a topicOptions with its context. * * @param topicOptions Topic to create. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link TopicProperties}. */ Mono<Response<TopicProperties>> createTopicWithResponse(String topicName, CreateTopicOptions topicOptions, Context context) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be empty.")); } if (topicOptions == null) { return monoError(logger, new NullPointerException("'topicOptions' cannot be null")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final TopicDescription topic = EntityHelper.getTopicDescription(topicOptions); final CreateTopicBodyContent content = new CreateTopicBodyContent() .setType(CONTENT_TYPE) .setTopicDescription(topic); final CreateTopicBody createEntity = new CreateTopicBody() .setContent(content); final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, SERVICE_BUS_TRACING_NAMESPACE_VALUE); try { return entityClient.putWithResponseAsync(topicName, createEntity, null, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(this::deserializeTopic); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes a queue with its context. * * @param queueName Name of queue to delete. * @param context Context to pass into request. * * @return A Mono that completes when the queue is deleted. */ Mono<Response<Void>> deleteQueueWithResponse(String queueName, Context context) { if (queueName == null) { return monoError(logger, new NullPointerException("'queueName' cannot be null")); } else if (queueName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'queueName' cannot be an empty string.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, SERVICE_BUS_TRACING_NAMESPACE_VALUE); try { return entityClient.deleteWithResponseAsync(queueName, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes a queue with its context. * * @param topicName Name of topic to delete. * @param subscriptionName Name of the subscription for the rule. * @param ruleName Name of the rule. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link QueueProperties}. */ Mono<Response<Void>> deleteRuleWithResponse(String topicName, String subscriptionName, String ruleName, Context context) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } else if (subscriptionName == null) { return monoError(logger, new NullPointerException("'subscriptionName' cannot be null")); } else if (subscriptionName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'subscriptionName' cannot be an empty string.")); } else if (ruleName == null) { return monoError(logger, new NullPointerException("'ruleName' cannot be null")); } else if (ruleName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'ruleName' cannot be an empty string.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, SERVICE_BUS_TRACING_NAMESPACE_VALUE); try { return rulesClient.deleteWithResponseAsync(topicName, subscriptionName, ruleName, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes a subscription with its context. * * @param topicName Name of topic associated with subscription to delete. * @param subscriptionName Name of subscription to delete. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link SubscriptionProperties}. */ Mono<Response<Void>> deleteSubscriptionWithResponse(String topicName, String subscriptionName, Context context) { if (subscriptionName == null) { return monoError(logger, new NullPointerException("'subscriptionName' cannot be null")); } else if (subscriptionName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'subscriptionName' cannot be an empty string.")); } else if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, SERVICE_BUS_TRACING_NAMESPACE_VALUE); try { return managementClient.getSubscriptions().deleteWithResponseAsync(topicName, subscriptionName, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes a topic with its context. * * @param topicName Name of topic to delete. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link TopicProperties}. */ Mono<Response<Void>> deleteTopicWithResponse(String topicName, Context context) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, SERVICE_BUS_TRACING_NAMESPACE_VALUE); try { return entityClient.deleteWithResponseAsync(topicName, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets whether an entity exists. * * @param getEntityOperation Operation to get information about entity. If {@link ResourceNotFoundException} is * thrown, then it is mapped to false. * @param <T> Entity type. * * @return True if the entity exists, false otherwise. */ <T> Mono<Response<Boolean>> getEntityExistsWithResponse(Mono<Response<T>> getEntityOperation) { return getEntityOperation.map(response -> { final boolean exists = response.getValue() != null; return (Response<Boolean>) new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), exists); }) .onErrorResume(ResourceNotFoundException.class, exception -> { final HttpResponse response = exception.getResponse(); final Response<Boolean> result = new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), false); return Mono.just(result); }); } /** * Gets a queue with its context. * * @param queueName Name of queue to fetch information for. * @param context Context to pass into request. * * @return A Mono that completes with the {@link QueueProperties}. */ <T> Mono<Response<T>> getQueueWithResponse(String queueName, Context context, Function<QueueProperties, T> mapper) { if (queueName == null) { return monoError(logger, new NullPointerException("'queueName' cannot be null")); } else if (queueName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'queueName' cannot be empty.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, SERVICE_BUS_TRACING_NAMESPACE_VALUE); try { return entityClient.getWithResponseAsync(queueName, true, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .handle((response, sink) -> { final Response<QueueProperties> deserialize = deserializeQueue(response); if (deserialize.getValue() == null) { final HttpResponse notFoundResponse = new EntityNotFoundHttpResponse<>(deserialize); sink.error(new ResourceNotFoundException(String.format("Queue '%s' does not exist.", queueName), notFoundResponse)); } else { final T mapped = mapper.apply(deserialize.getValue()); sink.next(new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), mapped)); } }); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<RuleProperties>> getRuleWithResponse(String topicName, String subscriptionName, String ruleName, Context context) { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, SERVICE_BUS_TRACING_NAMESPACE_VALUE); try { return rulesClient.getWithResponseAsync(topicName, subscriptionName, ruleName, true, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(this::deserializeRule); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets a subscription with its context. * * @param topicName Name of the topic associated with the subscription. * @param subscriptionName Name of subscription to fetch information for. * @param context Context to pass into request. * * @return A Mono that completes with the {@link SubscriptionProperties}. */ <T> Mono<Response<T>> getSubscriptionWithResponse(String topicName, String subscriptionName, Context context, Function<SubscriptionProperties, T> mapper) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } else if (subscriptionName == null) { return monoError(logger, new NullPointerException("'subscriptionName' cannot be null.")); } else if (subscriptionName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'subscriptionName' cannot be an empty string.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, SERVICE_BUS_TRACING_NAMESPACE_VALUE); try { return managementClient.getSubscriptions().getWithResponseAsync(topicName, subscriptionName, true, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .handle((response, sink) -> { final Response<SubscriptionProperties> deserialize = deserializeSubscription(topicName, response); if (deserialize.getValue() == null) { final HttpResponse notFoundResponse = new EntityNotFoundHttpResponse<>(deserialize); sink.error(new ResourceNotFoundException(String.format( "Subscription '%s' in topic '%s' does not exist.", topicName, subscriptionName), notFoundResponse)); } else { final T mapped = mapper.apply(deserialize.getValue()); sink.next(new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), mapped)); } }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets the namespace properties with its context. * * @param context Context to pass into request. * * @return A Mono that completes with the {@link NamespaceProperties}. */ Mono<Response<NamespaceProperties>> getNamespacePropertiesWithResponse(Context context) { return managementClient.getNamespaces().getWithResponseAsync(context).handle((response, sink) -> { final NamespacePropertiesEntry entry = response.getValue(); if (entry == null || entry.getContent() == null) { sink.error(new AzureException( "There was no content inside namespace response. Entry: " + response)); return; } final NamespaceProperties namespaceProperties = entry.getContent().getNamespaceProperties(); final Response<NamespaceProperties> result = new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), namespaceProperties); sink.next(result); }); } /** * Gets a topic with its context. * * @param topicName Name of topic to fetch information for. * @param context Context to pass into request. * * @return A Mono that completes with the {@link TopicProperties}. */ <T> Mono<Response<T>> getTopicWithResponse(String topicName, Context context, Function<TopicProperties, T> mapper) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be empty.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, SERVICE_BUS_TRACING_NAMESPACE_VALUE); try { return entityClient.getWithResponseAsync(topicName, true, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .handle((response, sink) -> { final Response<TopicProperties> deserialize = deserializeTopic(response); if (deserialize.getValue() == null) { final HttpResponse notFoundResponse = new EntityNotFoundHttpResponse<>(deserialize); sink.error(new ResourceNotFoundException(String.format("Topic '%s' does not exist.", topicName), notFoundResponse)); } else { final T mapped = mapper.apply(deserialize.getValue()); sink.next(new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), mapped)); } }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets the first page of queues with context. * * @param context Context to pass into request. * * @return A Mono that completes with a page of queues. */ Mono<PagedResponse<QueueProperties>> listQueuesFirstPage(Context context) { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, SERVICE_BUS_TRACING_NAMESPACE_VALUE); try { return listQueues(0, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the next page of queues with context. * * @param continuationToken Number of items to skip in feed. * @param context Context to pass into request. * * @return A Mono that completes with a page of queues or empty if there are no items left. */ Mono<PagedResponse<QueueProperties>> listQueuesNextPage(String continuationToken, Context context) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } try { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, SERVICE_BUS_TRACING_NAMESPACE_VALUE); final int skip = Integer.parseInt(continuationToken); return listQueues(skip, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the first page of rules with context. * * @param context Context to pass into request. * * @return A Mono that completes with a page of rules. */ Mono<PagedResponse<RuleProperties>> listRulesFirstPage(String topicName, String subscriptionName, Context context) { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, SERVICE_BUS_TRACING_NAMESPACE_VALUE); try { return listRules(topicName, subscriptionName, 0, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the next page of rules with context. * * @param continuationToken Number of items to skip in feed. * @param context Context to pass into request. * * @return A Mono that completes with a page of rules or empty if there are no items left. */ Mono<PagedResponse<RuleProperties>> listRulesNextPage(String topicName, String subscriptionName, String continuationToken, Context context) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } try { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, SERVICE_BUS_TRACING_NAMESPACE_VALUE); final int skip = Integer.parseInt(continuationToken); return listRules(topicName, subscriptionName, skip, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the first page of subscriptions with context. * * @param context Context to pass into request. * * @return A Mono that completes with a page of subscriptions. */ Mono<PagedResponse<SubscriptionProperties>> listSubscriptionsFirstPage(String topicName, Context context) { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, SERVICE_BUS_TRACING_NAMESPACE_VALUE); try { return listSubscriptions(topicName, 0, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the next page of subscriptions with context. * * @param continuationToken Number of items to skip in feed. * @param context Context to pass into request. * * @return A Mono that completes with a page of subscriptions or empty if there are no items left. */ Mono<PagedResponse<SubscriptionProperties>> listSubscriptionsNextPage(String topicName, String continuationToken, Context context) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } try { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, SERVICE_BUS_TRACING_NAMESPACE_VALUE); final int skip = Integer.parseInt(continuationToken); return listSubscriptions(topicName, skip, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the first page of topics with context. * * @param context Context to pass into request. * * @return A Mono that completes with a page of topics. */ Mono<PagedResponse<TopicProperties>> listTopicsFirstPage(Context context) { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, SERVICE_BUS_TRACING_NAMESPACE_VALUE); try { return listTopics(0, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the next page of topics with context. * * @param continuationToken Number of items to skip in feed. * @param context Context to pass into request. * * @return A Mono that completes with a page of topics or empty if there are no items left. */ Mono<PagedResponse<TopicProperties>> listTopicsNextPage(String continuationToken, Context context) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } try { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, SERVICE_BUS_TRACING_NAMESPACE_VALUE); final int skip = Integer.parseInt(continuationToken); return listTopics(skip, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Updates a queue with its context. * * @param queue Information about the queue to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * @param context Context to pass into request. * * @return A Mono that completes with the updated {@link QueueProperties}. */ Mono<Response<QueueProperties>> updateQueueWithResponse(QueueProperties queue, Context context) { if (queue == null) { final HttpHeaders supplementaryAuthHeaders = new HttpHeaders(); Context additionalContext = context.addData(AZ_TRACING_NAMESPACE_KEY, SERVICE_BUS_TRACING_NAMESPACE_VALUE); if (!CoreUtils.isNullOrEmpty(queue.getForwardTo())) { addAdditionalAuthHeader(SERVICE_BUS_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, supplementaryAuthHeaders); queue.setForwardTo(String.format("https: queue.getForwardTo())); } if (!CoreUtils.isNullOrEmpty(queue.getForwardDeadLetteredMessagesTo())) { addAdditionalAuthHeader(SERVICE_BUS_DLQ_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, supplementaryAuthHeaders); queue.setForwardDeadLetteredMessagesTo(String.format("https: queue.getForwardDeadLetteredMessagesTo())); } if (supplementaryAuthHeaders.getSize() != 0) { additionalContext = additionalContext.addData(AZURE_REQUEST_HTTP_HEADERS_KEY, supplementaryAuthHeaders); } final QueueDescription queueDescription = EntityHelper.toImplementation(queue); final CreateQueueBodyContent content = new CreateQueueBodyContent() .setType(CONTENT_TYPE) .setQueueDescription(queueDescription); final CreateQueueBody createEntity = new CreateQueueBody() .setContent(content); try { return entityClient.putWithResponseAsync(queue.getName(), createEntity, "*", additionalContext) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeQueue(response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates a rule with its context. * * @param rule Information about the rule to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * @param context Context to pass into request. * * @return A Mono that completes with the updated {@link RuleProperties}. */ Mono<Response<RuleProperties>> updateRuleWithResponse(String topicName, String subscriptionName, RuleProperties rule, Context context) { if (rule == null) { return monoError(logger, new NullPointerException("'rule' cannot be null")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final RuleDescription implementation = EntityHelper.toImplementation(rule); final CreateRuleBodyContent content = new CreateRuleBodyContent() .setType(CONTENT_TYPE) .setRuleDescription(implementation); final CreateRuleBody ruleBody = new CreateRuleBody() .setContent(content); final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, SERVICE_BUS_TRACING_NAMESPACE_VALUE); try { return managementClient.getRules().putWithResponseAsync(topicName, subscriptionName, rule.getName(), ruleBody, "*", withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeRule(response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates a subscription with its context. * * @param subscription Information about the subscription to update. You must provide all the property values * that are desired on the updated entity. Any values not provided are set to the service default values. * @param context Context to pass into request. * * @return A Mono that completes with the updated {@link SubscriptionProperties}. */ Mono<Response<SubscriptionProperties>> updateSubscriptionWithResponse(SubscriptionProperties subscription, Context context) { if (subscription == null) { return monoError(logger, new NullPointerException("'subscription' cannot be null")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final HttpHeaders supplementaryAuthHeaders = new HttpHeaders(); Context additionalContext = context.addData(AZ_TRACING_NAMESPACE_KEY, SERVICE_BUS_TRACING_NAMESPACE_VALUE); if (!CoreUtils.isNullOrEmpty(subscription.getForwardTo())) { addAdditionalAuthHeader(SERVICE_BUS_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, supplementaryAuthHeaders); subscription.setForwardTo(String.format("https: subscription.getForwardTo())); } if (!CoreUtils.isNullOrEmpty(subscription.getForwardDeadLetteredMessagesTo())) { addAdditionalAuthHeader(SERVICE_BUS_DLQ_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, supplementaryAuthHeaders); subscription.setForwardDeadLetteredMessagesTo(String.format("https: subscription.getForwardDeadLetteredMessagesTo())); } if (supplementaryAuthHeaders.getSize() != 0) { additionalContext = additionalContext.addData(AZURE_REQUEST_HTTP_HEADERS_KEY, supplementaryAuthHeaders); } final String topicName = subscription.getTopicName(); final String subscriptionName = subscription.getSubscriptionName(); final SubscriptionDescription implementation = EntityHelper.toImplementation(subscription); final CreateSubscriptionBodyContent content = new CreateSubscriptionBodyContent() .setType(CONTENT_TYPE) .setSubscriptionDescription(implementation); final CreateSubscriptionBody createEntity = new CreateSubscriptionBody() .setContent(content); try { return managementClient.getSubscriptions().putWithResponseAsync(topicName, subscriptionName, createEntity, "*", additionalContext) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeSubscription(topicName, response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates a topic with its context. * * @param topic Information about the topic to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * @param context Context to pass into request. * * @return A Mono that completes with the updated {@link TopicProperties}. */ Mono<Response<TopicProperties>> updateTopicWithResponse(TopicProperties topic, Context context) { if (topic == null) { return monoError(logger, new NullPointerException("'topic' cannot be null")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final TopicDescription implementation = EntityHelper.toImplementation(topic); final CreateTopicBodyContent content = new CreateTopicBodyContent() .setType(CONTENT_TYPE) .setTopicDescription(implementation); final CreateTopicBody createEntity = new CreateTopicBody() .setContent(content); final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, SERVICE_BUS_TRACING_NAMESPACE_VALUE); try { return entityClient.putWithResponseAsync(topic.getName(), createEntity, "*", withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeTopic(response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } private <T> T deserialize(Object object, Class<T> clazz) { if (object == null) { return null; } final String contents = String.valueOf(object); if (contents.isEmpty()) { return null; } try { return serializer.deserialize(contents, clazz); } catch (IOException e) { throw logger.logExceptionAsError(new RuntimeException(String.format( "Exception while deserializing. Body: [%s]. Class: %s", contents, clazz), e)); } } /** * Given an HTTP response, will deserialize it into a strongly typed Response object. * * @param response HTTP response to deserialize response body from. * @param clazz Class to deserialize response type into. * @param <T> Class type to deserialize response into. * * @return A Response with a strongly typed response value. */ private <T> Response<T> deserialize(Response<Object> response, Class<T> clazz) { final T deserialize = deserialize(response.getValue(), clazz); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), deserialize); } /** * Converts a Response into its corresponding {@link QueueDescriptionEntry} then mapped into {@link * QueueProperties}. * * @param response HTTP Response to deserialize. * * @return The corresponding HTTP response with convenience properties set. */ private Response<QueueProperties> deserializeQueue(Response<Object> response) { final QueueDescriptionEntry entry = deserialize(response.getValue(), QueueDescriptionEntry.class); if (entry == null) { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent() == null) { logger.info("entry.getContent() is null. The entity may not exist. {}", entry); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent().getQueueDescription() == null) { final TopicDescriptionEntry entryTopic = deserialize(response.getValue(), TopicDescriptionEntry.class); if (entryTopic != null && entryTopic.getContent() != null && entryTopic.getContent().getTopicDescription() != null) { logger.warning("'{}' is not a queue, it is a topic.", entryTopic.getTitle()); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } } final QueueProperties result = EntityHelper.toModel(entry.getContent().getQueueDescription()); final String queueName = getTitleValue(entry.getTitle()); EntityHelper.setQueueName(result, queueName); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), result); } /** * Converts a Response into its corresponding {@link RuleDescriptionEntry} then mapped into {@link RuleProperties}. * * @param response HTTP Response to deserialize. * * @return The corresponding HTTP response with convenience properties set. */ private Response<RuleProperties> deserializeRule(Response<Object> response) { final RuleDescriptionEntry entry = deserialize(response.getValue(), RuleDescriptionEntry.class); if (entry == null) { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent() == null) { logger.info("entry.getContent() is null. The entity may not exist. {}", entry); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } final RuleDescription description = entry.getContent().getRuleDescription(); final RuleProperties result = EntityHelper.toModel(description); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), result); } /** * Converts a Response into its corresponding {@link SubscriptionDescriptionEntry} then mapped into {@link * SubscriptionProperties}. * * @param response HTTP Response to deserialize. * * @return The corresponding HTTP response with convenience properties set. */ private Response<SubscriptionProperties> deserializeSubscription(String topicName, Response<Object> response) { final SubscriptionDescriptionEntry entry = deserialize(response.getValue(), SubscriptionDescriptionEntry.class); if (entry == null) { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent() == null) { logger.warning("entry.getContent() is null. There should have been content returned. Entry: {}", entry); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } final SubscriptionProperties subscription = EntityHelper.toModel( entry.getContent().getSubscriptionDescription()); final String subscriptionName = getTitleValue(entry.getTitle()); EntityHelper.setSubscriptionName(subscription, subscriptionName); EntityHelper.setTopicName(subscription, topicName); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), subscription); } /** * Converts a Response into its corresponding {@link TopicDescriptionEntry} then mapped into {@link * QueueProperties}. * * @param response HTTP Response to deserialize. * * @return The corresponding HTTP response with convenience properties set. */ private Response<TopicProperties> deserializeTopic(Response<Object> response) { final TopicDescriptionEntry entry = deserialize(response.getValue(), TopicDescriptionEntry.class); if (entry == null) { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent() == null) { logger.warning("entry.getContent() is null. There should have been content returned. Entry: {}", entry); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent().getTopicDescription() == null) { final QueueDescriptionEntry entryQueue = deserialize(response.getValue(), QueueDescriptionEntry.class); if (entryQueue != null && entryQueue.getContent() != null && entryQueue.getContent().getQueueDescription() != null) { logger.warning("'{}' is not a topic, it is a queue.", entryQueue.getTitle()); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } } final TopicProperties result = EntityHelper.toModel(entry.getContent().getTopicDescription()); final String topicName = getTitleValue(entry.getTitle()); EntityHelper.setTopicName(result, topicName); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), result); } /** * Creates a {@link FeedPage} given the elements and a set of response links to get the next link from. * * @param entities Entities in the feed. * @param responseLinks Links returned from the feed. * @param <TResult> Type of Service Bus entities in page. * * @return A {@link FeedPage} indicating whether this can be continued or not. * @throws MalformedURLException if the "next" page link does not contain a well-formed URL. */ private <TResult, TFeed> FeedPage<TResult> extractPage(Response<TFeed> response, List<TResult> entities, List<ResponseLink> responseLinks) throws MalformedURLException, UnsupportedEncodingException { final Optional<ResponseLink> nextLink = responseLinks.stream() .filter(link -> link.getRel().equalsIgnoreCase("next")) .findFirst(); if (!nextLink.isPresent()) { return new FeedPage<>(response.getStatusCode(), response.getHeaders(), response.getRequest(), entities); } final URL url = new URL(nextLink.get().getHref()); final String decode = URLDecoder.decode(url.getQuery(), StandardCharsets.UTF_8.name()); final Optional<Integer> skipParameter = Arrays.stream(decode.split("&amp;|&")) .map(part -> part.split("=", 2)) .filter(parts -> parts[0].equalsIgnoreCase("$skip") && parts.length == 2) .map(parts -> Integer.valueOf(parts[1])) .findFirst(); if (skipParameter.isPresent()) { return new FeedPage<>(response.getStatusCode(), response.getHeaders(), response.getRequest(), entities, skipParameter.get()); } else { logger.warning("There should have been a skip parameter for the next page."); return new FeedPage<>(response.getStatusCode(), response.getHeaders(), response.getRequest(), entities); } } /** * Helper method that invokes the service method, extracts the data and translates it to a PagedResponse. * * @param skip Number of elements to skip. * @param context Context for the query. * * @return A Mono that completes with a paged response of queues. */ private Mono<PagedResponse<QueueProperties>> listQueues(int skip, Context context) { return managementClient.listEntitiesWithResponseAsync(QUEUES_ENTITY_TYPE, skip, NUMBER_OF_ELEMENTS, context) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .flatMap(response -> { final Response<QueueDescriptionFeed> feedResponse = deserialize(response, QueueDescriptionFeed.class); final QueueDescriptionFeed feed = feedResponse.getValue(); if (feed == null) { logger.warning("Could not deserialize QueueDescriptionFeed. skip {}, top: {}", skip, NUMBER_OF_ELEMENTS); return Mono.empty(); } final List<QueueProperties> entities = feed.getEntry().stream() .filter(e -> e.getContent() != null && e.getContent().getQueueDescription() != null) .map(e -> { final String queueName = getTitleValue(e.getTitle()); final QueueProperties queueProperties = EntityHelper.toModel( e.getContent().getQueueDescription()); EntityHelper.setQueueName(queueProperties, queueName); return queueProperties; }) .collect(Collectors.toList()); try { return Mono.just(extractPage(feedResponse, entities, feed.getLink())); } catch (MalformedURLException | UnsupportedEncodingException error) { return Mono.error(new RuntimeException("Could not parse response into FeedPage<QueueDescription>", error)); } }); } /** * Helper method that invokes the service method, extracts the data and translates it to a PagedResponse. * * @param skip Number of elements to skip. * @param context Context for the query. * * @return A Mono that completes with a paged response of rules. */ private Mono<PagedResponse<RuleProperties>> listRules(String topicName, String subscriptionName, int skip, Context context) { return managementClient.listRulesWithResponseAsync(topicName, subscriptionName, skip, NUMBER_OF_ELEMENTS, context) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .flatMap(response -> { final Response<RuleDescriptionFeed> feedResponse = deserialize(response, RuleDescriptionFeed.class); final RuleDescriptionFeed feed = feedResponse.getValue(); if (feed == null) { logger.warning("Could not deserialize RuleDescriptionFeed. skip {}, top: {}", skip, NUMBER_OF_ELEMENTS); return Mono.empty(); } final List<RuleProperties> entities = feed.getEntry().stream() .filter(e -> e.getContent() != null && e.getContent().getRuleDescription() != null) .map(e -> { return EntityHelper.toModel(e.getContent().getRuleDescription()); }) .collect(Collectors.toList()); try { return Mono.just(extractPage(feedResponse, entities, feed.getLink())); } catch (MalformedURLException | UnsupportedEncodingException error) { return Mono.error(new RuntimeException( "Could not parse response into FeedPage<RuleDescription>", error)); } }); } /** * Helper method that invokes the service method, extracts the data and translates it to a PagedResponse. * * @param skip Number of elements to skip. * @param context Context for the query. * * @return A Mono that completes with a paged response of subscriptions. */ private Mono<PagedResponse<SubscriptionProperties>> listSubscriptions(String topicName, int skip, Context context) { return managementClient.listSubscriptionsWithResponseAsync(topicName, skip, NUMBER_OF_ELEMENTS, context) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .flatMap(response -> { final Response<SubscriptionDescriptionFeed> feedResponse = deserialize(response, SubscriptionDescriptionFeed.class); final SubscriptionDescriptionFeed feed = feedResponse.getValue(); if (feed == null) { logger.warning("Could not deserialize SubscriptionDescriptionFeed. skip {}, top: {}", skip, NUMBER_OF_ELEMENTS); return Mono.empty(); } final List<SubscriptionProperties> entities = feed.getEntry().stream() .filter(e -> e.getContent() != null && e.getContent().getSubscriptionDescription() != null) .map(e -> { final String subscriptionName = getTitleValue(e.getTitle()); final SubscriptionProperties description = EntityHelper.toModel( e.getContent().getSubscriptionDescription()); EntityHelper.setTopicName(description, topicName); EntityHelper.setSubscriptionName(description, subscriptionName); return description; }) .collect(Collectors.toList()); try { return Mono.just(extractPage(feedResponse, entities, feed.getLink())); } catch (MalformedURLException | UnsupportedEncodingException error) { return Mono.error(new RuntimeException( "Could not parse response into FeedPage<SubscriptionDescription>", error)); } }); } /** * Helper method that invokes the service method, extracts the data and translates it to a PagedResponse. * * @param skip Number of elements to skip. * @param context Context for the query. * * @return A Mono that completes with a paged response of topics. */ private Mono<PagedResponse<TopicProperties>> listTopics(int skip, Context context) { return managementClient.listEntitiesWithResponseAsync(TOPICS_ENTITY_TYPE, skip, NUMBER_OF_ELEMENTS, context) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .flatMap(response -> { final Response<TopicDescriptionFeed> feedResponse = deserialize(response, TopicDescriptionFeed.class); final TopicDescriptionFeed feed = feedResponse.getValue(); if (feed == null) { logger.warning("Could not deserialize TopicDescriptionFeed. skip {}, top: {}", skip, NUMBER_OF_ELEMENTS); return Mono.empty(); } final List<TopicProperties> entities = feed.getEntry().stream() .filter(e -> e.getContent() != null && e.getContent().getTopicDescription() != null) .map(e -> { final String topicName = getTitleValue(e.getTitle()); final TopicProperties topicProperties = EntityHelper.toModel( e.getContent().getTopicDescription()); EntityHelper.setTopicName(topicProperties, topicName); return topicProperties; }) .collect(Collectors.toList()); try { return Mono.just(extractPage(feedResponse, entities, feed.getLink())); } catch (MalformedURLException | UnsupportedEncodingException error) { return Mono.error(new RuntimeException("Could not parse response into FeedPage<TopicDescription>", error)); } }); } /** Adds the additional authentication headers needed for various types of forwarding options. * * @param headerName Name of the auth header */ private void addAdditionalAuthHeader(String headerName, HttpHeaders headers) { final String scope; if (tokenCredential instanceof ServiceBusSharedKeyCredential) { scope = String.format("https: } else { scope = ServiceBusConstants.AZURE_ACTIVE_DIRECTORY_SCOPE; } final Mono<AccessToken> tokenMono = tokenCredential.getToken(new TokenRequestContext().addScopes(scope)); final AccessToken token = tokenMono.block(ServiceBusConstants.OPERATION_TIMEOUT); if (headers == null || token == null) { return; } headers.add(headerName, token.getToken()); } /** * Given an XML title element, returns the XML text inside. Jackson deserializes Objects as LinkedHashMaps. XML text * is represented as an entry with an empty string as the key. * * For example, the text returned from this {@code <title text="text/xml">QueueName</title>} is "QueueName". * * @param responseTitle XML title element. * * @return The XML text inside the title. {@code null} is returned if there is no value. */ @SuppressWarnings("unchecked") private String getTitleValue(Object responseTitle) { if (!(responseTitle instanceof Map)) { return null; } final Map<String, String> map; try { map = (Map<String, String>) responseTitle; return map.get(""); } catch (ClassCastException error) { logger.warning("Unable to cast to Map<String,String>. Title: {}", responseTitle, error); return null; } } /** * Maps an exception from the ATOM APIs to its associated {@link HttpResponseException}. * * @param exception Exception from the ATOM API. * * @return The corresponding {@link HttpResponseException} or {@code throwable} if it is not an instance of {@link * ServiceBusManagementErrorException}. */ private static Throwable mapException(Throwable exception) { if (!(exception instanceof ServiceBusManagementErrorException)) { return exception; } final ServiceBusManagementErrorException managementError = ((ServiceBusManagementErrorException) exception); final ServiceBusManagementError error = managementError.getValue(); final HttpResponse errorHttpResponse = managementError.getResponse(); final int statusCode = error != null && error.getCode() != null ? error.getCode() : errorHttpResponse.getStatusCode(); final String errorDetail = error != null && error.getDetail() != null ? error.getDetail() : managementError.getMessage(); switch (statusCode) { case 401: return new ClientAuthenticationException(errorDetail, managementError.getResponse(), exception); case 404: return new ResourceNotFoundException(errorDetail, managementError.getResponse(), exception); case 409: return new ResourceExistsException(errorDetail, managementError.getResponse(), exception); case 412: return new ResourceModifiedException(errorDetail, managementError.getResponse(), exception); default: return new HttpResponseException(errorDetail, managementError.getResponse(), exception); } } /** * A page of Service Bus entities. * * @param <T> The entity description from Service Bus. */ private static final class FeedPage<T> implements PagedResponse<T> { private final int statusCode; private final HttpHeaders header; private final HttpRequest request; private final IterableStream<T> entries; private final String continuationToken; /** * Creates a page that does not have any more pages. * * @param entries Items in the page. */ private FeedPage(int statusCode, HttpHeaders header, HttpRequest request, List<T> entries) { this.statusCode = statusCode; this.header = header; this.request = request; this.entries = new IterableStream<>(entries); this.continuationToken = null; } /** * Creates an instance that has additional pages to fetch. * * @param entries Items in the page. * @param skip Number of elements to "skip". */ private FeedPage(int statusCode, HttpHeaders header, HttpRequest request, List<T> entries, int skip) { this.statusCode = statusCode; this.header = header; this.request = request; this.entries = new IterableStream<>(entries); this.continuationToken = String.valueOf(skip); } @Override public IterableStream<T> getElements() { return entries; } @Override public String getContinuationToken() { return continuationToken; } @Override public int getStatusCode() { return statusCode; } @Override public HttpHeaders getHeaders() { return header; } @Override public HttpRequest getRequest() { return request; } @Override public void close() { } } private static final class EntityNotFoundHttpResponse<T> extends HttpResponse { private final int statusCode; private final HttpHeaders headers; private EntityNotFoundHttpResponse(Response<T> response) { super(response.getRequest()); this.headers = response.getHeaders(); this.statusCode = response.getStatusCode(); } @Override public int getStatusCode() { return statusCode; } @Override public String getHeaderValue(String name) { return headers.getValue(name); } @Override public HttpHeaders getHeaders() { return headers; } @Override public Flux<ByteBuffer> getBody() { return Flux.empty(); } @Override public Mono<byte[]> getBodyAsByteArray() { return Mono.empty(); } @Override public Mono<String> getBodyAsString() { return Mono.empty(); } @Override public Mono<String> getBodyAsString(Charset charset) { return Mono.empty(); } } }
class ServiceBusAdministrationAsyncClient { private static final String CONTENT_TYPE = "application/xml"; private static final String QUEUES_ENTITY_TYPE = "queues"; private static final String TOPICS_ENTITY_TYPE = "topics"; private static final int NUMBER_OF_ELEMENTS = 100; private final ServiceBusManagementClientImpl managementClient; private final EntitiesImpl entityClient; private final ClientLogger logger = new ClientLogger(ServiceBusAdministrationAsyncClient.class); private final ServiceBusManagementSerializer serializer; private final RulesImpl rulesClient; /** * Creates a new instance with the given management client and serializer. * * @param managementClient Client to make management calls. * @param serializer Serializer to deserialize ATOM XML responses. * * @throws NullPointerException if any one of {@code managementClient, serializer, credential} is null. */ ServiceBusAdministrationAsyncClient(ServiceBusManagementClientImpl managementClient, ServiceBusManagementSerializer serializer) { this.serializer = Objects.requireNonNull(serializer, "'serializer' cannot be null."); this.managementClient = Objects.requireNonNull(managementClient, "'managementClient' cannot be null."); this.entityClient = managementClient.getEntities(); this.rulesClient = managementClient.getRules(); } /** * Creates a queue with the given name. * * @param queueName Name of the queue to create. * * @return A Mono that completes with information about the created queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code queueName} is null. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws ResourceExistsException if a queue exists with the same {@code queueName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueProperties> createQueue(String queueName) { try { return createQueue(queueName, new CreateQueueOptions()); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Creates a queue with the {@link CreateQueueOptions} and given queue name. * * @param queueName Name of the queue to create. * @param queueOptions Options about the queue to create. * * @return A Mono that completes with information about the created queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} or {@code queueOptions} is null. * @throws ResourceExistsException if a queue exists with the same {@link QueueProperties * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueProperties> createQueue(String queueName, CreateQueueOptions queueOptions) { return createQueueWithResponse(queueName, queueOptions).map(Response::getValue); } /** * Creates a queue and returns the created queue in addition to the HTTP response. * * @param queueName Name of the queue to create. * @param queueOptions Options about the queue to create. * * @return A Mono that returns the created queue in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} or {@code queueOptions} is null. * @throws ResourceExistsException if a queue exists with the same {@link QueueProperties * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<QueueProperties>> createQueueWithResponse(String queueName, CreateQueueOptions queueOptions) { return withContext(context -> createQueueWithResponse(queueName, queueOptions, context)); } /** * Creates a rule under the given topic and subscription * * @param topicName Name of the topic associated with rule. * @param subscriptionName Name of the subscription associated with the rule. * @param ruleName Name of the rule. * * @return A Mono that completes with information about the created rule. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} are are empty strings. * @throws NullPointerException if {@code topicName} or {@code ruleName} are are null. * @throws ResourceExistsException if a rule exists with the same topic, subscription, and rule name. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<RuleProperties> createRule(String topicName, String subscriptionName, String ruleName) { try { return createRule(topicName, subscriptionName, ruleName, new CreateRuleOptions()); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Creates a rule with the {@link CreateRuleOptions}. * * @param topicName Name of the topic associated with rule. * @param subscriptionName Name of the subscription associated with the rule. * @param ruleName Name of the rule. * @param ruleOptions Information about the rule to create. * * @return A Mono that completes with information about the created rule. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} are are empty strings. * @throws NullPointerException if {@code topicName}, {@code ruleName}, or {@code ruleOptions} * are are null. * @throws ResourceExistsException if a rule exists with the same topic and rule name. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<RuleProperties> createRule(String topicName, String subscriptionName, String ruleName, CreateRuleOptions ruleOptions) { return createRuleWithResponse(topicName, subscriptionName, ruleName, ruleOptions) .map(Response::getValue); } /** * Creates a rule and returns the created rule in addition to the HTTP response. * * @param topicName Name of the topic associated with rule. * @param subscriptionName Name of the subscription associated with the rule. * @param ruleName Name of the rule. * @param ruleOptions Information about the rule to create. * * @return A Mono that returns the created rule in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} are are empty strings. * @throws NullPointerException if {@code topicName}, {@code ruleName}, or {@code ruleOptions} * are are null. * @throws ResourceExistsException if a rule exists with the same topic and rule name. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<RuleProperties>> createRuleWithResponse(String topicName, String subscriptionName, String ruleName, CreateRuleOptions ruleOptions) { return withContext(context -> createRuleWithResponse(topicName, subscriptionName, ruleName, ruleOptions, context)); } /** * Creates a subscription with the given topic and subscription names. * * @param topicName Name of the topic associated with subscription. * @param subscriptionName Name of the subscription. * * @return A Mono that completes with information about the created subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are are empty strings. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} are are null. * @throws ResourceExistsException if a subscription exists with the same topic and subscription name. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionProperties> createSubscription(String topicName, String subscriptionName) { try { return createSubscription(topicName, subscriptionName, new CreateSubscriptionOptions()); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Creates a subscription with the {@link CreateSubscriptionOptions}. * * @param topicName Name of the topic associated with subscription. * @param subscriptionName Name of the subscription. * @param subscriptionOptions Information about the subscription to create. * * @return A Mono that completes with information about the created subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are are empty strings. * @throws NullPointerException if {@code topicName}, {@code subscriptionName}, or {@code subscriptionOptions} * are are null. * @throws ResourceExistsException if a subscription exists with the same topic and subscription name. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionProperties> createSubscription(String topicName, String subscriptionName, CreateSubscriptionOptions subscriptionOptions) { return createSubscriptionWithResponse(topicName, subscriptionName, subscriptionOptions) .map(Response::getValue); } /** * Creates a subscription and returns the created subscription in addition to the HTTP response. * * @param topicName Name of the topic associated with subscription. * @param subscriptionName Name of the subscription. * @param subscriptionOptions Information about the subscription to create. * * @return A Mono that returns the created subscription in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are are empty strings. * @throws NullPointerException if {@code topicName}, {@code subscriptionName}, or {@code subscriptionOptions} * are are null. * @throws ResourceExistsException if a subscription exists with the same topic and subscription name. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SubscriptionProperties>> createSubscriptionWithResponse(String topicName, String subscriptionName, CreateSubscriptionOptions subscriptionOptions) { return withContext(context -> createSubscriptionWithResponse(topicName, subscriptionName, subscriptionOptions, context)); } /** * Creates a topic with the given name. * * @param topicName Name of the topic to create. * * @return A Mono that completes with information about the created topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code topicName} is null. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws ResourceExistsException if a topic exists with the same {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicProperties> createTopic(String topicName) { try { return createTopic(topicName, new CreateTopicOptions()); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Creates a topic with the {@link CreateTopicOptions}. * * @param topicName Name of the topic to create. * @param topicOptions The options used to create the topic. * * @return A Mono that completes with information about the created topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code topicOptions} is null. * @throws ResourceExistsException if a topic exists with the same {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicProperties> createTopic(String topicName, CreateTopicOptions topicOptions) { return createTopicWithResponse(topicName, topicOptions).map(Response::getValue); } /** * Creates a topic and returns the created topic in addition to the HTTP response. * * @param topicName Name of the topic to create. * @param topicOptions The options used to create the topic. * * @return A Mono that returns the created topic in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link TopicProperties * string. * @throws NullPointerException if {@code topicName} or {@code topicOptions} is null. * @throws ResourceExistsException if a topic exists with the same {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<TopicProperties>> createTopicWithResponse(String topicName, CreateTopicOptions topicOptions) { return withContext(context -> createTopicWithResponse(topicName, topicOptions, context)); } /** * Deletes a queue the matching {@code queueName}. * * @param queueName Name of queue to delete. * * @return A Mono that completes when the queue is deleted. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws NullPointerException if {@code queueName} is null. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteQueue(String queueName) { return deleteQueueWithResponse(queueName).then(); } /** * Deletes a queue the matching {@code queueName} and returns the HTTP response. * * @param queueName Name of queue to delete. * * @return A Mono that completes when the queue is deleted and returns the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws NullPointerException if {@code queueName} is null. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteQueueWithResponse(String queueName) { return withContext(context -> deleteQueueWithResponse(queueName, context)); } /** * Deletes a rule the matching {@code ruleName}. * * @param topicName Name of topic associated with rule to delete. * @param subscriptionName Name of the subscription associated with the rule to delete. * @param ruleName Name of rule to delete. * * @return A Mono that completes when the rule is deleted. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code ruleName} is null. * @throws ResourceNotFoundException if the {@code ruleName} does not exist. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteRule(String topicName, String subscriptionName, String ruleName) { return deleteRuleWithResponse(topicName, subscriptionName, ruleName).then(); } /** * Deletes a rule the matching {@code ruleName} and returns the HTTP response. * * @param topicName Name of topic associated with rule to delete. * @param subscriptionName Name of the subscription associated with the rule to delete. * @param ruleName Name of rule to delete. * * @return A Mono that completes when the rule is deleted and returns the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName}, {@code subscriptionName}, or {@code ruleName} is an * empty string. * @throws NullPointerException if {@code topicName}, {@code subscriptionName}, or {@code ruleName} is null. * @throws ResourceNotFoundException if the {@code ruleName} does not exist. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteRuleWithResponse(String topicName, String subscriptionName, String ruleName) { return withContext(context -> deleteRuleWithResponse(topicName, subscriptionName, ruleName, context)); } /** * Deletes a subscription the matching {@code subscriptionName}. * * @param topicName Name of topic associated with subscription to delete. * @param subscriptionName Name of subscription to delete. * * @return A Mono that completes when the subscription is deleted. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} is null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteSubscription(String topicName, String subscriptionName) { return deleteSubscriptionWithResponse(topicName, subscriptionName).then(); } /** * Deletes a subscription the matching {@code subscriptionName} and returns the HTTP response. * * @param topicName Name of topic associated with subscription to delete. * @param subscriptionName Name of subscription to delete. * * @return A Mono that completes when the subscription is deleted and returns the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} is null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteSubscriptionWithResponse(String topicName, String subscriptionName) { return withContext(context -> deleteSubscriptionWithResponse(topicName, subscriptionName, context)); } /** * Deletes a topic the matching {@code topicName}. * * @param topicName Name of topic to delete. * * @return A Mono that completes when the topic is deleted. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteTopic(String topicName) { return deleteTopicWithResponse(topicName).then(); } /** * Deletes a topic the matching {@code topicName} and returns the HTTP response. * * @param topicName Name of topic to delete. * * @return A Mono that completes when the topic is deleted and returns the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteTopicWithResponse(String topicName) { return withContext(context -> deleteTopicWithResponse(topicName, context)); } /** * Gets information about the queue. * * @param queueName Name of queue to get information about. * * @return A Mono that completes with information about the queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueProperties> getQueue(String queueName) { return getQueueWithResponse(queueName).map(Response::getValue); } /** * Gets information about the queue along with its HTTP response. * * @param queueName Name of queue to get information about. * * @return A Mono that completes with information about the queue and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<QueueProperties>> getQueueWithResponse(String queueName) { return withContext(context -> getQueueWithResponse(queueName, context, Function.identity())); } /** * Gets whether or not a queue with {@code queueName} exists in the Service Bus namespace. * * @param queueName Name of the queue. * * @return A Mono that completes indicating whether or not the queue exists. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Boolean> getQueueExists(String queueName) { return getQueueExistsWithResponse(queueName).map(Response::getValue); } /** * Gets whether or not a queue with {@code queueName} exists in the Service Bus namespace. * * @param queueName Name of the queue. * * @return A Mono that completes indicating whether or not the queue exists along with its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Boolean>> getQueueExistsWithResponse(String queueName) { return getEntityExistsWithResponse(getQueueWithResponse(queueName)); } /** * Gets runtime properties about the queue. * * @param queueName Name of queue to get information about. * * @return A Mono that completes with runtime properties about the queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueRuntimeProperties> getQueueRuntimeProperties(String queueName) { return getQueueRuntimePropertiesWithResponse(queueName).map(response -> response.getValue()); } /** * Gets runtime properties about the queue along with its HTTP response. * * @param queueName Name of queue to get information about. * * @return A Mono that completes with runtime properties about the queue and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<QueueRuntimeProperties>> getQueueRuntimePropertiesWithResponse(String queueName) { return withContext(context -> getQueueWithResponse(queueName, context, QueueRuntimeProperties::new)); } /** * Gets information about the Service Bus namespace. * * @return A Mono that completes with information about the Service Bus namespace. * @throws ClientAuthenticationException if the client's credentials do not have access to the namespace. * @throws HttpResponseException If error occurred processing the request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<NamespaceProperties> getNamespaceProperties() { return getNamespacePropertiesWithResponse().map(Response::getValue); } /** * Gets information about the Service Bus namespace along with its HTTP response. * * @return A Mono that completes with information about the namespace and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<NamespaceProperties>> getNamespacePropertiesWithResponse() { return withContext(this::getNamespacePropertiesWithResponse); } /** * Gets a rule from the service namespace. * * Only following data types are deserialized in Filters and Action parameters - string, int, long, boolean, double, * and OffsetDateTime. Other data types would return its string value. * * @param topicName The name of the topic relative to service bus namespace. * @param subscriptionName The subscription name the rule belongs to. * @param ruleName The name of the rule to retrieve. * * @return The associated rule. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<RuleProperties> getRule(String topicName, String subscriptionName, String ruleName) { return getRuleWithResponse(topicName, subscriptionName, ruleName).map(response -> response.getValue()); } /** * Gets a rule from the service namespace. * * Only following data types are deserialized in Filters and Action parameters - string, int, long, bool, double, * and OffsetDateTime. Other data types would return its string value. * * @param topicName The name of the topic relative to service bus namespace. * @param subscriptionName The subscription name the rule belongs to. * @param ruleName The name of the rule to retrieve. * * @return The associated rule with the corresponding HTTP response. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<RuleProperties>> getRuleWithResponse(String topicName, String subscriptionName, String ruleName) { return withContext(context -> getRuleWithResponse(topicName, subscriptionName, ruleName, context)); } /** * Gets information about the queue. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return A Mono that completes with information about the subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are empty strings. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} are null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist in the {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionProperties> getSubscription(String topicName, String subscriptionName) { return getSubscriptionWithResponse(topicName, subscriptionName).map(Response::getValue); } /** * Gets information about the subscription along with its HTTP response. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return A Mono that completes with information about the subscription and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are empty strings. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} are null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SubscriptionProperties>> getSubscriptionWithResponse(String topicName, String subscriptionName) { return withContext(context -> getSubscriptionWithResponse(topicName, subscriptionName, context, Function.identity())); } /** * Gets whether or not a subscription within a topic exists. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of the subscription. * * @return A Mono that completes indicating whether or not the subscription exists. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code subscriptionName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Boolean> getSubscriptionExists(String topicName, String subscriptionName) { return getSubscriptionExistsWithResponse(topicName, subscriptionName).map(Response::getValue); } /** * Gets whether or not a subscription within a topic exists. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of the subscription. * * @return A Mono that completes indicating whether or not the subscription exists along with its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code subscriptionName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Boolean>> getSubscriptionExistsWithResponse(String topicName, String subscriptionName) { return getEntityExistsWithResponse(getSubscriptionWithResponse(topicName, subscriptionName)); } /** * Gets runtime properties about the subscription. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return A Mono that completes with runtime properties about the subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are empty strings. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} are null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionRuntimeProperties> getSubscriptionRuntimeProperties( String topicName, String subscriptionName) { return getSubscriptionRuntimePropertiesWithResponse(topicName, subscriptionName) .map(response -> response.getValue()); } /** * Gets runtime properties about the subscription. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return A Mono that completes with runtime properties about the subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code subscriptionName} is null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SubscriptionRuntimeProperties>> getSubscriptionRuntimePropertiesWithResponse( String topicName, String subscriptionName) { return withContext(context -> getSubscriptionWithResponse(topicName, subscriptionName, context, SubscriptionRuntimeProperties::new)); } /** * Gets information about the topic. * * @param topicName Name of topic to get information about. * * @return A Mono that completes with information about the topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicProperties> getTopic(String topicName) { return getTopicWithResponse(topicName).map(Response::getValue); } /** * Gets information about the topic along with its HTTP response. * * @param topicName Name of topic to get information about. * * @return A Mono that completes with information about the topic and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<TopicProperties>> getTopicWithResponse(String topicName) { return withContext(context -> getTopicWithResponse(topicName, context, Function.identity())); } /** * Gets whether or not a topic with {@code topicName} exists in the Service Bus namespace. * * @param topicName Name of the topic. * * @return A Mono that completes indicating whether or not the topic exists. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Boolean> getTopicExists(String topicName) { return getTopicExistsWithResponse(topicName).map(Response::getValue); } /** * Gets whether or not a topic with {@code topicName} exists in the Service Bus namespace. * * @param topicName Name of the topic. * * @return A Mono that completes indicating whether or not the topic exists along with its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Boolean>> getTopicExistsWithResponse(String topicName) { return getEntityExistsWithResponse(getTopicWithResponse(topicName)); } /** * Gets runtime properties about the topic. * * @param topicName Name of topic to get information about. * * @return A Mono that completes with runtime properties about the topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicRuntimeProperties> getTopicRuntimeProperties(String topicName) { return getTopicRuntimePropertiesWithResponse(topicName).map(response -> response.getValue()); } /** * Gets runtime properties about the topic with its HTTP response. * * @param topicName Name of topic to get information about. * * @return A Mono that completes with runtime properties about the topic and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<TopicRuntimeProperties>> getTopicRuntimePropertiesWithResponse(String topicName) { return withContext(context -> getTopicWithResponse(topicName, context, TopicRuntimeProperties::new)); } /** * Fetches all the queues in the Service Bus namespace. * * @return A Flux of {@link QueueProperties queues} in the Service Bus namespace. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<QueueProperties> listQueues() { return new PagedFlux<>( () -> withContext(context -> listQueuesFirstPage(context)), token -> withContext(context -> listQueuesNextPage(token, context))); } /** * Fetches all the rules for a topic and subscription. * * @param topicName The topic name under which all the rules need to be retrieved. * @param subscriptionName The name of the subscription for which all rules need to be retrieved. * * @return A Flux of {@link RuleProperties rules} for the {@code topicName} and {@code subscriptionName}. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} is null. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} is an empty string. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<RuleProperties> listRules(String topicName, String subscriptionName) { if (topicName == null) { return pagedFluxError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return pagedFluxError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } return new PagedFlux<>( () -> withContext(context -> listRulesFirstPage(topicName, subscriptionName, context)), token -> withContext(context -> listRulesNextPage(topicName, subscriptionName, token, context))); } /** * Fetches all the subscriptions for a topic. * * @param topicName The topic name under which all the subscriptions need to be retrieved. * * @return A Flux of {@link SubscriptionProperties subscriptions} for the {@code topicName}. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws NullPointerException if {@code topicName} is null. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<SubscriptionProperties> listSubscriptions(String topicName) { if (topicName == null) { return pagedFluxError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return pagedFluxError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } return new PagedFlux<>( () -> withContext(context -> listSubscriptionsFirstPage(topicName, context)), token -> withContext(context -> listSubscriptionsNextPage(topicName, token, context))); } /** * Fetches all the topics in the Service Bus namespace. * * @return A Flux of {@link TopicProperties topics} in the Service Bus namespace. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<TopicProperties> listTopics() { return new PagedFlux<>( () -> withContext(context -> listTopicsFirstPage(context)), token -> withContext(context -> listTopicsNextPage(token, context))); } /** * Updates a queue with the given {@link QueueProperties}. The {@link QueueProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link QueueProperties * <li>{@link QueueProperties * <li>{@link QueueProperties * </li> * <li>{@link QueueProperties * </ul> * * @param queue Information about the queue to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that completes with the updated queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code queue} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueProperties> updateQueue(QueueProperties queue) { return updateQueueWithResponse(queue).map(Response::getValue); } /** * Updates a queue with the given {@link QueueProperties}. The {@link QueueProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link QueueProperties * <li>{@link QueueProperties * <li>{@link QueueProperties * </li> * <li>{@link QueueProperties * </ul> * * @param queue Information about the queue to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated queue in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code queue} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<QueueProperties>> updateQueueWithResponse(QueueProperties queue) { return withContext(context -> updateQueueWithResponse(queue, context)); } /** * Updates a rule with the given {@link RuleProperties}. The {@link RuleProperties} must be fully populated as all * of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * @param topicName The topic name under which the rule is updated. * @param subscriptionName The name of the subscription for which the rule is updated. * @param rule Information about the rule to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated rule. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the rule quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link RuleProperties * @throws NullPointerException if {@code rule} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<RuleProperties> updateRule(String topicName, String subscriptionName, RuleProperties rule) { return updateRuleWithResponse(topicName, subscriptionName, rule).map(Response::getValue); } /** * Updates a rule with the given {@link RuleProperties}. The {@link RuleProperties} must be fully populated as all * of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * @param topicName The topic name under which the rule is updated. * @param subscriptionName The name of the subscription for which the rule is updated. * @param rule Information about the rule to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated rule in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the rule quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link RuleProperties * @throws NullPointerException if {@code rule} is null. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<RuleProperties>> updateRuleWithResponse(String topicName, String subscriptionName, RuleProperties rule) { return withContext(context -> updateRuleWithResponse(topicName, subscriptionName, rule, context)); } /** * Updates a subscription with the given {@link SubscriptionProperties}. The {@link SubscriptionProperties} must be * fully populated as all of the properties are replaced. If a property is not set the service default value is * used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * </ul> * * @param subscription Information about the subscription to update. You must provide all the property values * that are desired on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the subscription quota is exceeded, or an * error occurred processing the request. * @throws IllegalArgumentException if {@link SubscriptionProperties * SubscriptionProperties * @throws NullPointerException if {@code subscription} is null. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionProperties> updateSubscription(SubscriptionProperties subscription) { return updateSubscriptionWithResponse(subscription).map(Response::getValue); } /** * Updates a subscription with the given {@link SubscriptionProperties}. The {@link SubscriptionProperties} must be * fully populated as all of the properties are replaced. If a property is not set the service default value is * used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * </ul> * * @param subscription Information about the subscription to update. You must provide all the property values * that are desired on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated subscription in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the subscription quota is exceeded, or an * error occurred processing the request. * @throws IllegalArgumentException if {@link SubscriptionProperties * SubscriptionProperties * @throws NullPointerException if {@code subscription} is null. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SubscriptionProperties>> updateSubscriptionWithResponse( SubscriptionProperties subscription) { return withContext(context -> updateSubscriptionWithResponse(subscription, context)); } /** * Updates a topic with the given {@link TopicProperties}. The {@link TopicProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link TopicProperties * <li>{@link TopicProperties * </li> * </ul> * * @param topic Information about the topic to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that completes with the updated topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link TopicProperties * string. * @throws NullPointerException if {@code topic} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicProperties> updateTopic(TopicProperties topic) { return updateTopicWithResponse(topic).map(Response::getValue); } /** * Updates a topic with the given {@link TopicProperties}. The {@link TopicProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link TopicProperties * <li>{@link TopicProperties * </li> * </ul> * * @param topic Information about the topic to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that completes with the updated topic and its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link TopicProperties * string. * @throws NullPointerException if {@code topic} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<TopicProperties>> updateTopicWithResponse(TopicProperties topic) { return withContext(context -> updateTopicWithResponse(topic, context)); } /** * Creates a queue with its context. * * @param createQueueOptions Queue to create. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link QueueProperties}. */ Mono<Response<QueueProperties>> createQueueWithResponse(String queueName, CreateQueueOptions createQueueOptions, Context context) { if (queueName == null) { return monoError(logger, new NullPointerException("'queueName' cannot be null.")); } else if (queueName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'queueName' cannot be empty.")); } if (createQueueOptions == null) { return monoError(logger, new NullPointerException("'createQueueOptions' cannot be null.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context contextWithHeaders = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE) .addData(AZURE_REQUEST_HTTP_HEADERS_KEY, new HttpHeaders()); final String forwardToEntity = createQueueOptions.getForwardTo(); if (!CoreUtils.isNullOrEmpty(forwardToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardToEntity, contextWithHeaders); createQueueOptions.setForwardTo(getAbsoluteUrlFromEntity(forwardToEntity)); } final String forwardDlqToEntity = createQueueOptions.getForwardDeadLetteredMessagesTo(); if (!CoreUtils.isNullOrEmpty(forwardDlqToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_DLQ_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardDlqToEntity, contextWithHeaders); createQueueOptions.setForwardDeadLetteredMessagesTo(getAbsoluteUrlFromEntity(forwardDlqToEntity)); } final QueueDescription description = EntityHelper.getQueueDescription(createQueueOptions); final CreateQueueBodyContent content = new CreateQueueBodyContent() .setType(CONTENT_TYPE) .setQueueDescription(description); final CreateQueueBody createEntity = new CreateQueueBody() .setContent(content); try { return entityClient.putWithResponseAsync(queueName, createEntity, null, contextWithHeaders) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(this::deserializeQueue); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a rule with its context. * * @param ruleOptions Rule to create. * @param context Context to pass into request. * * * @return A Mono that completes with the created {@link RuleProperties}. */ Mono<Response<RuleProperties>> createRuleWithResponse(String topicName, String subscriptionName, String ruleName, CreateRuleOptions ruleOptions, Context context) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be empty.")); } if (subscriptionName == null) { return monoError(logger, new NullPointerException("'subscriptionName' cannot be null.")); } else if (subscriptionName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'subscriptionName' cannot be empty.")); } if (ruleName == null) { return monoError(logger, new NullPointerException("'ruleName' cannot be null.")); } else if (ruleName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'ruleName' cannot be empty.")); } if (ruleOptions == null) { return monoError(logger, new NullPointerException("'rule' cannot be null.")); } final RuleActionImpl action = ruleOptions.getAction() != null ? EntityHelper.toImplementation(ruleOptions.getAction()) : null; final RuleFilterImpl filter = ruleOptions.getFilter() != null ? EntityHelper.toImplementation(ruleOptions.getFilter()) : null; final RuleDescription rule = new RuleDescription() .setAction(action) .setFilter(filter) .setName(ruleName); final CreateRuleBodyContent content = new CreateRuleBodyContent() .setType(CONTENT_TYPE) .setRuleDescription(rule); final CreateRuleBody createEntity = new CreateRuleBody().setContent(content); final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return managementClient.getRules().putWithResponseAsync(topicName, subscriptionName, ruleName, createEntity, null, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeRule(response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a subscription with its context. * * @param subscriptionOptions Subscription to create. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link SubscriptionProperties}. */ Mono<Response<SubscriptionProperties>> createSubscriptionWithResponse(String topicName, String subscriptionName, CreateSubscriptionOptions subscriptionOptions, Context context) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be empty.")); } if (subscriptionName == null) { return monoError(logger, new NullPointerException("'subscriptionName' cannot be null.")); } else if (subscriptionName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'subscriptionName' cannot be empty.")); } if (subscriptionOptions == null) { return monoError(logger, new NullPointerException("'subscription' cannot be null.")); } final Context contextWithHeaders = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE) .addData(AZURE_REQUEST_HTTP_HEADERS_KEY, new HttpHeaders()); final String forwardToEntity = subscriptionOptions.getForwardTo(); if (!CoreUtils.isNullOrEmpty(forwardToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardToEntity, contextWithHeaders); subscriptionOptions.setForwardTo(getAbsoluteUrlFromEntity(forwardToEntity)); } final String forwardDlqToEntity = subscriptionOptions.getForwardDeadLetteredMessagesTo(); if (!CoreUtils.isNullOrEmpty(forwardDlqToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_DLQ_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardDlqToEntity, contextWithHeaders); subscriptionOptions.setForwardDeadLetteredMessagesTo(getAbsoluteUrlFromEntity(forwardDlqToEntity)); } final SubscriptionDescription subscription = EntityHelper.getSubscriptionDescription(subscriptionOptions); final CreateSubscriptionBodyContent content = new CreateSubscriptionBodyContent() .setType(CONTENT_TYPE) .setSubscriptionDescription(subscription); final CreateSubscriptionBody createEntity = new CreateSubscriptionBody().setContent(content); try { return managementClient.getSubscriptions().putWithResponseAsync(topicName, subscriptionName, createEntity, null, contextWithHeaders) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeSubscription(topicName, response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a topicOptions with its context. * * @param topicOptions Topic to create. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link TopicProperties}. */ Mono<Response<TopicProperties>> createTopicWithResponse(String topicName, CreateTopicOptions topicOptions, Context context) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be empty.")); } if (topicOptions == null) { return monoError(logger, new NullPointerException("'topicOptions' cannot be null")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final TopicDescription topic = EntityHelper.getTopicDescription(topicOptions); final CreateTopicBodyContent content = new CreateTopicBodyContent() .setType(CONTENT_TYPE) .setTopicDescription(topic); final CreateTopicBody createEntity = new CreateTopicBody() .setContent(content); final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return entityClient.putWithResponseAsync(topicName, createEntity, null, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(this::deserializeTopic); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes a queue with its context. * * @param queueName Name of queue to delete. * @param context Context to pass into request. * * @return A Mono that completes when the queue is deleted. */ Mono<Response<Void>> deleteQueueWithResponse(String queueName, Context context) { if (queueName == null) { return monoError(logger, new NullPointerException("'queueName' cannot be null")); } else if (queueName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'queueName' cannot be an empty string.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return entityClient.deleteWithResponseAsync(queueName, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes a queue with its context. * * @param topicName Name of topic to delete. * @param subscriptionName Name of the subscription for the rule. * @param ruleName Name of the rule. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link QueueProperties}. */ Mono<Response<Void>> deleteRuleWithResponse(String topicName, String subscriptionName, String ruleName, Context context) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } else if (subscriptionName == null) { return monoError(logger, new NullPointerException("'subscriptionName' cannot be null")); } else if (subscriptionName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'subscriptionName' cannot be an empty string.")); } else if (ruleName == null) { return monoError(logger, new NullPointerException("'ruleName' cannot be null")); } else if (ruleName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'ruleName' cannot be an empty string.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return rulesClient.deleteWithResponseAsync(topicName, subscriptionName, ruleName, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes a subscription with its context. * * @param topicName Name of topic associated with subscription to delete. * @param subscriptionName Name of subscription to delete. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link SubscriptionProperties}. */ Mono<Response<Void>> deleteSubscriptionWithResponse(String topicName, String subscriptionName, Context context) { if (subscriptionName == null) { return monoError(logger, new NullPointerException("'subscriptionName' cannot be null")); } else if (subscriptionName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'subscriptionName' cannot be an empty string.")); } else if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return managementClient.getSubscriptions().deleteWithResponseAsync(topicName, subscriptionName, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes a topic with its context. * * @param topicName Name of topic to delete. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link TopicProperties}. */ Mono<Response<Void>> deleteTopicWithResponse(String topicName, Context context) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return entityClient.deleteWithResponseAsync(topicName, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets whether an entity exists. * * @param getEntityOperation Operation to get information about entity. If {@link ResourceNotFoundException} is * thrown, then it is mapped to false. * @param <T> Entity type. * * @return True if the entity exists, false otherwise. */ <T> Mono<Response<Boolean>> getEntityExistsWithResponse(Mono<Response<T>> getEntityOperation) { return getEntityOperation.map(response -> { final boolean exists = response.getValue() != null; return (Response<Boolean>) new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), exists); }) .onErrorResume(ResourceNotFoundException.class, exception -> { final HttpResponse response = exception.getResponse(); final Response<Boolean> result = new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), false); return Mono.just(result); }); } /** * Gets a queue with its context. * * @param queueName Name of queue to fetch information for. * @param context Context to pass into request. * * @return A Mono that completes with the {@link QueueProperties}. */ <T> Mono<Response<T>> getQueueWithResponse(String queueName, Context context, Function<QueueProperties, T> mapper) { if (queueName == null) { return monoError(logger, new NullPointerException("'queueName' cannot be null")); } else if (queueName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'queueName' cannot be empty.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return entityClient.getWithResponseAsync(queueName, true, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .handle((response, sink) -> { final Response<QueueProperties> deserialize = deserializeQueue(response); if (deserialize.getValue() == null) { final HttpResponse notFoundResponse = new EntityNotFoundHttpResponse<>(deserialize); sink.error(new ResourceNotFoundException(String.format("Queue '%s' does not exist.", queueName), notFoundResponse)); } else { final T mapped = mapper.apply(deserialize.getValue()); sink.next(new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), mapped)); } }); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<RuleProperties>> getRuleWithResponse(String topicName, String subscriptionName, String ruleName, Context context) { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return rulesClient.getWithResponseAsync(topicName, subscriptionName, ruleName, true, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(this::deserializeRule); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets a subscription with its context. * * @param topicName Name of the topic associated with the subscription. * @param subscriptionName Name of subscription to fetch information for. * @param context Context to pass into request. * * @return A Mono that completes with the {@link SubscriptionProperties}. */ <T> Mono<Response<T>> getSubscriptionWithResponse(String topicName, String subscriptionName, Context context, Function<SubscriptionProperties, T> mapper) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } else if (subscriptionName == null) { return monoError(logger, new NullPointerException("'subscriptionName' cannot be null.")); } else if (subscriptionName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'subscriptionName' cannot be an empty string.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return managementClient.getSubscriptions().getWithResponseAsync(topicName, subscriptionName, true, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .handle((response, sink) -> { final Response<SubscriptionProperties> deserialize = deserializeSubscription(topicName, response); if (deserialize.getValue() == null) { final HttpResponse notFoundResponse = new EntityNotFoundHttpResponse<>(deserialize); sink.error(new ResourceNotFoundException(String.format( "Subscription '%s' in topic '%s' does not exist.", topicName, subscriptionName), notFoundResponse)); } else { final T mapped = mapper.apply(deserialize.getValue()); sink.next(new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), mapped)); } }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets the namespace properties with its context. * * @param context Context to pass into request. * * @return A Mono that completes with the {@link NamespaceProperties}. */ Mono<Response<NamespaceProperties>> getNamespacePropertiesWithResponse(Context context) { return managementClient.getNamespaces().getWithResponseAsync(context).handle((response, sink) -> { final NamespacePropertiesEntry entry = response.getValue(); if (entry == null || entry.getContent() == null) { sink.error(new AzureException( "There was no content inside namespace response. Entry: " + response)); return; } final NamespaceProperties namespaceProperties = entry.getContent().getNamespaceProperties(); final Response<NamespaceProperties> result = new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), namespaceProperties); sink.next(result); }); } /** * Gets a topic with its context. * * @param topicName Name of topic to fetch information for. * @param context Context to pass into request. * * @return A Mono that completes with the {@link TopicProperties}. */ <T> Mono<Response<T>> getTopicWithResponse(String topicName, Context context, Function<TopicProperties, T> mapper) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be empty.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return entityClient.getWithResponseAsync(topicName, true, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .handle((response, sink) -> { final Response<TopicProperties> deserialize = deserializeTopic(response); if (deserialize.getValue() == null) { final HttpResponse notFoundResponse = new EntityNotFoundHttpResponse<>(deserialize); sink.error(new ResourceNotFoundException(String.format("Topic '%s' does not exist.", topicName), notFoundResponse)); } else { final T mapped = mapper.apply(deserialize.getValue()); sink.next(new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), mapped)); } }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets the first page of queues with context. * * @param context Context to pass into request. * * @return A Mono that completes with a page of queues. */ Mono<PagedResponse<QueueProperties>> listQueuesFirstPage(Context context) { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return listQueues(0, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the next page of queues with context. * * @param continuationToken Number of items to skip in feed. * @param context Context to pass into request. * * @return A Mono that completes with a page of queues or empty if there are no items left. */ Mono<PagedResponse<QueueProperties>> listQueuesNextPage(String continuationToken, Context context) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } try { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); final int skip = Integer.parseInt(continuationToken); return listQueues(skip, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the first page of rules with context. * * @param context Context to pass into request. * * @return A Mono that completes with a page of rules. */ Mono<PagedResponse<RuleProperties>> listRulesFirstPage(String topicName, String subscriptionName, Context context) { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return listRules(topicName, subscriptionName, 0, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the next page of rules with context. * * @param continuationToken Number of items to skip in feed. * @param context Context to pass into request. * * @return A Mono that completes with a page of rules or empty if there are no items left. */ Mono<PagedResponse<RuleProperties>> listRulesNextPage(String topicName, String subscriptionName, String continuationToken, Context context) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } try { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); final int skip = Integer.parseInt(continuationToken); return listRules(topicName, subscriptionName, skip, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the first page of subscriptions with context. * * @param context Context to pass into request. * * @return A Mono that completes with a page of subscriptions. */ Mono<PagedResponse<SubscriptionProperties>> listSubscriptionsFirstPage(String topicName, Context context) { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return listSubscriptions(topicName, 0, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the next page of subscriptions with context. * * @param continuationToken Number of items to skip in feed. * @param context Context to pass into request. * * @return A Mono that completes with a page of subscriptions or empty if there are no items left. */ Mono<PagedResponse<SubscriptionProperties>> listSubscriptionsNextPage(String topicName, String continuationToken, Context context) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } try { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); final int skip = Integer.parseInt(continuationToken); return listSubscriptions(topicName, skip, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the first page of topics with context. * * @param context Context to pass into request. * * @return A Mono that completes with a page of topics. */ Mono<PagedResponse<TopicProperties>> listTopicsFirstPage(Context context) { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return listTopics(0, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the next page of topics with context. * * @param continuationToken Number of items to skip in feed. * @param context Context to pass into request. * * @return A Mono that completes with a page of topics or empty if there are no items left. */ Mono<PagedResponse<TopicProperties>> listTopicsNextPage(String continuationToken, Context context) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } try { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); final int skip = Integer.parseInt(continuationToken); return listTopics(skip, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Updates a queue with its context. * * @param queue Information about the queue to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * @param context Context to pass into request. * * @return A Mono that completes with the updated {@link QueueProperties}. */ Mono<Response<QueueProperties>> updateQueueWithResponse(QueueProperties queue, Context context) { if (queue == null) { final Context contextWithHeaders = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE) .addData(AZURE_REQUEST_HTTP_HEADERS_KEY, new HttpHeaders()); final String forwardToEntity = queue.getForwardTo(); if (!CoreUtils.isNullOrEmpty(forwardToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardToEntity, contextWithHeaders); queue.setForwardTo(getAbsoluteUrlFromEntity(forwardToEntity)); } final String forwardDlqToEntity = queue.getForwardDeadLetteredMessagesTo(); if (!CoreUtils.isNullOrEmpty(forwardDlqToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_DLQ_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardDlqToEntity, contextWithHeaders); queue.setForwardDeadLetteredMessagesTo(getAbsoluteUrlFromEntity(forwardDlqToEntity)); } final QueueDescription queueDescription = EntityHelper.toImplementation(queue); final CreateQueueBodyContent content = new CreateQueueBodyContent() .setType(CONTENT_TYPE) .setQueueDescription(queueDescription); final CreateQueueBody createEntity = new CreateQueueBody() .setContent(content); try { return entityClient.putWithResponseAsync(queue.getName(), createEntity, "*", contextWithHeaders) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeQueue(response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates a rule with its context. * * @param rule Information about the rule to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * @param context Context to pass into request. * * @return A Mono that completes with the updated {@link RuleProperties}. */ Mono<Response<RuleProperties>> updateRuleWithResponse(String topicName, String subscriptionName, RuleProperties rule, Context context) { if (rule == null) { return monoError(logger, new NullPointerException("'rule' cannot be null")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final RuleDescription implementation = EntityHelper.toImplementation(rule); final CreateRuleBodyContent content = new CreateRuleBodyContent() .setType(CONTENT_TYPE) .setRuleDescription(implementation); final CreateRuleBody ruleBody = new CreateRuleBody() .setContent(content); final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return managementClient.getRules().putWithResponseAsync(topicName, subscriptionName, rule.getName(), ruleBody, "*", withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeRule(response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates a subscription with its context. * * @param subscription Information about the subscription to update. You must provide all the property values * that are desired on the updated entity. Any values not provided are set to the service default values. * @param context Context to pass into request. * * @return A Mono that completes with the updated {@link SubscriptionProperties}. */ Mono<Response<SubscriptionProperties>> updateSubscriptionWithResponse(SubscriptionProperties subscription, Context context) { if (subscription == null) { return monoError(logger, new NullPointerException("'subscription' cannot be null")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context contextWithHeaders = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE) .addData(AZURE_REQUEST_HTTP_HEADERS_KEY, new HttpHeaders()); final String forwardToEntity = subscription.getForwardTo(); if (!CoreUtils.isNullOrEmpty(forwardToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardToEntity, contextWithHeaders); subscription.setForwardTo(getAbsoluteUrlFromEntity(forwardToEntity)); } final String forwardDlqToEntity = subscription.getForwardDeadLetteredMessagesTo(); if (!CoreUtils.isNullOrEmpty(forwardDlqToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_DLQ_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardDlqToEntity, contextWithHeaders); subscription.setForwardDeadLetteredMessagesTo(getAbsoluteUrlFromEntity(forwardDlqToEntity)); } final String topicName = subscription.getTopicName(); final String subscriptionName = subscription.getSubscriptionName(); final SubscriptionDescription implementation = EntityHelper.toImplementation(subscription); final CreateSubscriptionBodyContent content = new CreateSubscriptionBodyContent() .setType(CONTENT_TYPE) .setSubscriptionDescription(implementation); final CreateSubscriptionBody createEntity = new CreateSubscriptionBody() .setContent(content); try { return managementClient.getSubscriptions().putWithResponseAsync(topicName, subscriptionName, createEntity, "*", contextWithHeaders) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeSubscription(topicName, response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates a topic with its context. * * @param topic Information about the topic to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * @param context Context to pass into request. * * @return A Mono that completes with the updated {@link TopicProperties}. */ Mono<Response<TopicProperties>> updateTopicWithResponse(TopicProperties topic, Context context) { if (topic == null) { return monoError(logger, new NullPointerException("'topic' cannot be null")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final TopicDescription implementation = EntityHelper.toImplementation(topic); final CreateTopicBodyContent content = new CreateTopicBodyContent() .setType(CONTENT_TYPE) .setTopicDescription(implementation); final CreateTopicBody createEntity = new CreateTopicBody() .setContent(content); final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return entityClient.putWithResponseAsync(topic.getName(), createEntity, "*", withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeTopic(response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } private <T> T deserialize(Object object, Class<T> clazz) { if (object == null) { return null; } final String contents = String.valueOf(object); if (contents.isEmpty()) { return null; } try { return serializer.deserialize(contents, clazz); } catch (IOException e) { throw logger.logExceptionAsError(new RuntimeException(String.format( "Exception while deserializing. Body: [%s]. Class: %s", contents, clazz), e)); } } /** * Given an HTTP response, will deserialize it into a strongly typed Response object. * * @param response HTTP response to deserialize response body from. * @param clazz Class to deserialize response type into. * @param <T> Class type to deserialize response into. * * @return A Response with a strongly typed response value. */ private <T> Response<T> deserialize(Response<Object> response, Class<T> clazz) { final T deserialize = deserialize(response.getValue(), clazz); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), deserialize); } /** * Converts a Response into its corresponding {@link QueueDescriptionEntry} then mapped into {@link * QueueProperties}. * * @param response HTTP Response to deserialize. * * @return The corresponding HTTP response with convenience properties set. */ private Response<QueueProperties> deserializeQueue(Response<Object> response) { final QueueDescriptionEntry entry = deserialize(response.getValue(), QueueDescriptionEntry.class); if (entry == null) { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent() == null) { logger.info("entry.getContent() is null. The entity may not exist. {}", entry); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent().getQueueDescription() == null) { final TopicDescriptionEntry entryTopic = deserialize(response.getValue(), TopicDescriptionEntry.class); if (entryTopic != null && entryTopic.getContent() != null && entryTopic.getContent().getTopicDescription() != null) { logger.warning("'{}' is not a queue, it is a topic.", entryTopic.getTitle()); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } } final QueueProperties result = EntityHelper.toModel(entry.getContent().getQueueDescription()); final String queueName = getTitleValue(entry.getTitle()); EntityHelper.setQueueName(result, queueName); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), result); } /** * Converts a Response into its corresponding {@link RuleDescriptionEntry} then mapped into {@link RuleProperties}. * * @param response HTTP Response to deserialize. * * @return The corresponding HTTP response with convenience properties set. */ private Response<RuleProperties> deserializeRule(Response<Object> response) { final RuleDescriptionEntry entry = deserialize(response.getValue(), RuleDescriptionEntry.class); if (entry == null) { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent() == null) { logger.info("entry.getContent() is null. The entity may not exist. {}", entry); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } final RuleDescription description = entry.getContent().getRuleDescription(); final RuleProperties result = EntityHelper.toModel(description); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), result); } /** * Converts a Response into its corresponding {@link SubscriptionDescriptionEntry} then mapped into {@link * SubscriptionProperties}. * * @param response HTTP Response to deserialize. * * @return The corresponding HTTP response with convenience properties set. */ private Response<SubscriptionProperties> deserializeSubscription(String topicName, Response<Object> response) { final SubscriptionDescriptionEntry entry = deserialize(response.getValue(), SubscriptionDescriptionEntry.class); if (entry == null) { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent() == null) { logger.warning("entry.getContent() is null. There should have been content returned. Entry: {}", entry); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } final SubscriptionProperties subscription = EntityHelper.toModel( entry.getContent().getSubscriptionDescription()); final String subscriptionName = getTitleValue(entry.getTitle()); EntityHelper.setSubscriptionName(subscription, subscriptionName); EntityHelper.setTopicName(subscription, topicName); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), subscription); } /** * Converts a Response into its corresponding {@link TopicDescriptionEntry} then mapped into {@link * QueueProperties}. * * @param response HTTP Response to deserialize. * * @return The corresponding HTTP response with convenience properties set. */ private Response<TopicProperties> deserializeTopic(Response<Object> response) { final TopicDescriptionEntry entry = deserialize(response.getValue(), TopicDescriptionEntry.class); if (entry == null) { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent() == null) { logger.warning("entry.getContent() is null. There should have been content returned. Entry: {}", entry); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent().getTopicDescription() == null) { final QueueDescriptionEntry entryQueue = deserialize(response.getValue(), QueueDescriptionEntry.class); if (entryQueue != null && entryQueue.getContent() != null && entryQueue.getContent().getQueueDescription() != null) { logger.warning("'{}' is not a topic, it is a queue.", entryQueue.getTitle()); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } } final TopicProperties result = EntityHelper.toModel(entry.getContent().getTopicDescription()); final String topicName = getTitleValue(entry.getTitle()); EntityHelper.setTopicName(result, topicName); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), result); } /** * Creates a {@link FeedPage} given the elements and a set of response links to get the next link from. * * @param entities Entities in the feed. * @param responseLinks Links returned from the feed. * @param <TResult> Type of Service Bus entities in page. * * @return A {@link FeedPage} indicating whether this can be continued or not. * @throws MalformedURLException if the "next" page link does not contain a well-formed URL. */ private <TResult, TFeed> FeedPage<TResult> extractPage(Response<TFeed> response, List<TResult> entities, List<ResponseLink> responseLinks) throws MalformedURLException, UnsupportedEncodingException { final Optional<ResponseLink> nextLink = responseLinks.stream() .filter(link -> link.getRel().equalsIgnoreCase("next")) .findFirst(); if (!nextLink.isPresent()) { return new FeedPage<>(response.getStatusCode(), response.getHeaders(), response.getRequest(), entities); } final URL url = new URL(nextLink.get().getHref()); final String decode = URLDecoder.decode(url.getQuery(), StandardCharsets.UTF_8.name()); final Optional<Integer> skipParameter = Arrays.stream(decode.split("&amp;|&")) .map(part -> part.split("=", 2)) .filter(parts -> parts[0].equalsIgnoreCase("$skip") && parts.length == 2) .map(parts -> Integer.valueOf(parts[1])) .findFirst(); if (skipParameter.isPresent()) { return new FeedPage<>(response.getStatusCode(), response.getHeaders(), response.getRequest(), entities, skipParameter.get()); } else { logger.warning("There should have been a skip parameter for the next page."); return new FeedPage<>(response.getStatusCode(), response.getHeaders(), response.getRequest(), entities); } } /** * Helper method that invokes the service method, extracts the data and translates it to a PagedResponse. * * @param skip Number of elements to skip. * @param context Context for the query. * * @return A Mono that completes with a paged response of queues. */ private Mono<PagedResponse<QueueProperties>> listQueues(int skip, Context context) { return managementClient.listEntitiesWithResponseAsync(QUEUES_ENTITY_TYPE, skip, NUMBER_OF_ELEMENTS, context) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .flatMap(response -> { final Response<QueueDescriptionFeed> feedResponse = deserialize(response, QueueDescriptionFeed.class); final QueueDescriptionFeed feed = feedResponse.getValue(); if (feed == null) { logger.warning("Could not deserialize QueueDescriptionFeed. skip {}, top: {}", skip, NUMBER_OF_ELEMENTS); return Mono.empty(); } final List<QueueProperties> entities = feed.getEntry().stream() .filter(e -> e.getContent() != null && e.getContent().getQueueDescription() != null) .map(e -> { final String queueName = getTitleValue(e.getTitle()); final QueueProperties queueProperties = EntityHelper.toModel( e.getContent().getQueueDescription()); EntityHelper.setQueueName(queueProperties, queueName); return queueProperties; }) .collect(Collectors.toList()); try { return Mono.just(extractPage(feedResponse, entities, feed.getLink())); } catch (MalformedURLException | UnsupportedEncodingException error) { return Mono.error(new RuntimeException("Could not parse response into FeedPage<QueueDescription>", error)); } }); } /** * Helper method that invokes the service method, extracts the data and translates it to a PagedResponse. * * @param skip Number of elements to skip. * @param context Context for the query. * * @return A Mono that completes with a paged response of rules. */ private Mono<PagedResponse<RuleProperties>> listRules(String topicName, String subscriptionName, int skip, Context context) { return managementClient.listRulesWithResponseAsync(topicName, subscriptionName, skip, NUMBER_OF_ELEMENTS, context) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .flatMap(response -> { final Response<RuleDescriptionFeed> feedResponse = deserialize(response, RuleDescriptionFeed.class); final RuleDescriptionFeed feed = feedResponse.getValue(); if (feed == null) { logger.warning("Could not deserialize RuleDescriptionFeed. skip {}, top: {}", skip, NUMBER_OF_ELEMENTS); return Mono.empty(); } final List<RuleProperties> entities = feed.getEntry().stream() .filter(e -> e.getContent() != null && e.getContent().getRuleDescription() != null) .map(e -> { return EntityHelper.toModel(e.getContent().getRuleDescription()); }) .collect(Collectors.toList()); try { return Mono.just(extractPage(feedResponse, entities, feed.getLink())); } catch (MalformedURLException | UnsupportedEncodingException error) { return Mono.error(new RuntimeException( "Could not parse response into FeedPage<RuleDescription>", error)); } }); } /** * Helper method that invokes the service method, extracts the data and translates it to a PagedResponse. * * @param skip Number of elements to skip. * @param context Context for the query. * * @return A Mono that completes with a paged response of subscriptions. */ private Mono<PagedResponse<SubscriptionProperties>> listSubscriptions(String topicName, int skip, Context context) { return managementClient.listSubscriptionsWithResponseAsync(topicName, skip, NUMBER_OF_ELEMENTS, context) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .flatMap(response -> { final Response<SubscriptionDescriptionFeed> feedResponse = deserialize(response, SubscriptionDescriptionFeed.class); final SubscriptionDescriptionFeed feed = feedResponse.getValue(); if (feed == null) { logger.warning("Could not deserialize SubscriptionDescriptionFeed. skip {}, top: {}", skip, NUMBER_OF_ELEMENTS); return Mono.empty(); } final List<SubscriptionProperties> entities = feed.getEntry().stream() .filter(e -> e.getContent() != null && e.getContent().getSubscriptionDescription() != null) .map(e -> { final String subscriptionName = getTitleValue(e.getTitle()); final SubscriptionProperties description = EntityHelper.toModel( e.getContent().getSubscriptionDescription()); EntityHelper.setTopicName(description, topicName); EntityHelper.setSubscriptionName(description, subscriptionName); return description; }) .collect(Collectors.toList()); try { return Mono.just(extractPage(feedResponse, entities, feed.getLink())); } catch (MalformedURLException | UnsupportedEncodingException error) { return Mono.error(new RuntimeException( "Could not parse response into FeedPage<SubscriptionDescription>", error)); } }); } /** * Helper method that invokes the service method, extracts the data and translates it to a PagedResponse. * * @param skip Number of elements to skip. * @param context Context for the query. * * @return A Mono that completes with a paged response of topics. */ private Mono<PagedResponse<TopicProperties>> listTopics(int skip, Context context) { return managementClient.listEntitiesWithResponseAsync(TOPICS_ENTITY_TYPE, skip, NUMBER_OF_ELEMENTS, context) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .flatMap(response -> { final Response<TopicDescriptionFeed> feedResponse = deserialize(response, TopicDescriptionFeed.class); final TopicDescriptionFeed feed = feedResponse.getValue(); if (feed == null) { logger.warning("Could not deserialize TopicDescriptionFeed. skip {}, top: {}", skip, NUMBER_OF_ELEMENTS); return Mono.empty(); } final List<TopicProperties> entities = feed.getEntry().stream() .filter(e -> e.getContent() != null && e.getContent().getTopicDescription() != null) .map(e -> { final String topicName = getTitleValue(e.getTitle()); final TopicProperties topicProperties = EntityHelper.toModel( e.getContent().getTopicDescription()); EntityHelper.setTopicName(topicProperties, topicName); return topicProperties; }) .collect(Collectors.toList()); try { return Mono.just(extractPage(feedResponse, entities, feed.getLink())); } catch (MalformedURLException | UnsupportedEncodingException error) { return Mono.error(new RuntimeException("Could not parse response into FeedPage<TopicDescription>", error)); } }); } /** * Check that the additional headers field is present and add the additional auth header * * @param headerName name of the header to be added * @param context current request context * * @return boolean representing the outcome of adding header operation */ private void addSupplementaryAuthHeader(String headerName, String entity, Context context) { context.getData(AZURE_REQUEST_HTTP_HEADERS_KEY) .ifPresent(headers -> { if (headers instanceof HttpHeaders) { HttpHeaders customHttpHeaders = (HttpHeaders) headers; customHttpHeaders.add(headerName, entity); } }); } /** * Checks if the given entity is an absolute URL, if so return it. * Otherwise, construct the URL from the given entity and return that. * * @param entity : entity to forward messages to. * * @return Forward to Entity represented as an absolute URL */ private String getAbsoluteUrlFromEntity(String entity) { try { URL url = new URL(entity); return url.toString(); } catch (MalformedURLException ex) { } UrlBuilder urlBuilder = new UrlBuilder(); urlBuilder.setScheme("https"); urlBuilder.setHost(managementClient.getEndpoint()); urlBuilder.setPath(entity); try { URL url = urlBuilder.toUrl(); return url.toString(); } catch (MalformedURLException ex) { logger.error("Failed to construct URL using the endpoint:'{}' and entity:'{}'", managementClient.getEndpoint(), entity); logger.logThrowableAsError(ex); } return null; } /** * Given an XML title element, returns the XML text inside. Jackson deserializes Objects as LinkedHashMaps. XML text * is represented as an entry with an empty string as the key. * * For example, the text returned from this {@code <title text="text/xml">QueueName</title>} is "QueueName". * * @param responseTitle XML title element. * * @return The XML text inside the title. {@code null} is returned if there is no value. */ @SuppressWarnings("unchecked") private String getTitleValue(Object responseTitle) { if (!(responseTitle instanceof Map)) { return null; } final Map<String, String> map; try { map = (Map<String, String>) responseTitle; return map.get(""); } catch (ClassCastException error) { logger.warning("Unable to cast to Map<String,String>. Title: {}", responseTitle, error); return null; } } /** * Maps an exception from the ATOM APIs to its associated {@link HttpResponseException}. * * @param exception Exception from the ATOM API. * * @return The corresponding {@link HttpResponseException} or {@code throwable} if it is not an instance of {@link * ServiceBusManagementErrorException}. */ private static Throwable mapException(Throwable exception) { if (!(exception instanceof ServiceBusManagementErrorException)) { return exception; } final ServiceBusManagementErrorException managementError = ((ServiceBusManagementErrorException) exception); final ServiceBusManagementError error = managementError.getValue(); final HttpResponse errorHttpResponse = managementError.getResponse(); final int statusCode = error != null && error.getCode() != null ? error.getCode() : errorHttpResponse.getStatusCode(); final String errorDetail = error != null && error.getDetail() != null ? error.getDetail() : managementError.getMessage(); switch (statusCode) { case 401: return new ClientAuthenticationException(errorDetail, managementError.getResponse(), exception); case 404: return new ResourceNotFoundException(errorDetail, managementError.getResponse(), exception); case 409: return new ResourceExistsException(errorDetail, managementError.getResponse(), exception); case 412: return new ResourceModifiedException(errorDetail, managementError.getResponse(), exception); default: return new HttpResponseException(errorDetail, managementError.getResponse(), exception); } } /** * A page of Service Bus entities. * * @param <T> The entity description from Service Bus. */ private static final class FeedPage<T> implements PagedResponse<T> { private final int statusCode; private final HttpHeaders header; private final HttpRequest request; private final IterableStream<T> entries; private final String continuationToken; /** * Creates a page that does not have any more pages. * * @param entries Items in the page. */ private FeedPage(int statusCode, HttpHeaders header, HttpRequest request, List<T> entries) { this.statusCode = statusCode; this.header = header; this.request = request; this.entries = new IterableStream<>(entries); this.continuationToken = null; } /** * Creates an instance that has additional pages to fetch. * * @param entries Items in the page. * @param skip Number of elements to "skip". */ private FeedPage(int statusCode, HttpHeaders header, HttpRequest request, List<T> entries, int skip) { this.statusCode = statusCode; this.header = header; this.request = request; this.entries = new IterableStream<>(entries); this.continuationToken = String.valueOf(skip); } @Override public IterableStream<T> getElements() { return entries; } @Override public String getContinuationToken() { return continuationToken; } @Override public int getStatusCode() { return statusCode; } @Override public HttpHeaders getHeaders() { return header; } @Override public HttpRequest getRequest() { return request; } @Override public void close() { } } private static final class EntityNotFoundHttpResponse<T> extends HttpResponse { private final int statusCode; private final HttpHeaders headers; private EntityNotFoundHttpResponse(Response<T> response) { super(response.getRequest()); this.headers = response.getHeaders(); this.statusCode = response.getStatusCode(); } @Override public int getStatusCode() { return statusCode; } @Override public String getHeaderValue(String name) { return headers.getValue(name); } @Override public HttpHeaders getHeaders() { return headers; } @Override public Flux<ByteBuffer> getBody() { return Flux.empty(); } @Override public Mono<byte[]> getBodyAsByteArray() { return Mono.empty(); } @Override public Mono<String> getBodyAsString() { return Mono.empty(); } @Override public Mono<String> getBodyAsString(Charset charset) { return Mono.empty(); } } }
Use a string constant?
Mono<Response<QueueProperties>> updateQueueWithResponse(QueueProperties queue, Context context) { if (queue == null) { return monoError(logger, new NullPointerException("'queue' cannot be null")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final HttpHeaders supplementaryAuthHeaders = new HttpHeaders(); Context additionalContext = context.addData(AZ_TRACING_NAMESPACE_KEY, SERVICE_BUS_TRACING_NAMESPACE_VALUE); if (!CoreUtils.isNullOrEmpty(queue.getForwardTo())) { addAdditionalAuthHeader(SERVICE_BUS_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, supplementaryAuthHeaders); queue.setForwardTo(String.format("https: queue.getForwardTo())); } if (!CoreUtils.isNullOrEmpty(queue.getForwardDeadLetteredMessagesTo())) { addAdditionalAuthHeader(SERVICE_BUS_DLQ_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, supplementaryAuthHeaders); queue.setForwardDeadLetteredMessagesTo(String.format("https: queue.getForwardDeadLetteredMessagesTo())); } if (supplementaryAuthHeaders.getSize() != 0) { additionalContext = additionalContext.addData(AZURE_REQUEST_HTTP_HEADERS_KEY, supplementaryAuthHeaders); } final QueueDescription queueDescription = EntityHelper.toImplementation(queue); final CreateQueueBodyContent content = new CreateQueueBodyContent() .setType(CONTENT_TYPE) .setQueueDescription(queueDescription); final CreateQueueBody createEntity = new CreateQueueBody() .setContent(content); try { return entityClient.putWithResponseAsync(queue.getName(), createEntity, "*", additionalContext) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeQueue(response)); } catch (RuntimeException ex) { return monoError(logger, ex); } }
queue.setForwardTo(String.format("https:
return monoError(logger, new NullPointerException("'queue' cannot be null")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); }
class ServiceBusAdministrationAsyncClient { private static final String SERVICE_BUS_TRACING_NAMESPACE_VALUE = "Microsoft.ServiceBus"; private static final String SERVICE_BUS_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME = "ServiceBusSupplementaryAuthorization"; private static final String SERVICE_BUS_DLQ_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME = "ServiceBusDlqSupplementaryAuthorization"; private static final String CONTENT_TYPE = "application/xml"; private static final String QUEUES_ENTITY_TYPE = "queues"; private static final String TOPICS_ENTITY_TYPE = "topics"; private static final int NUMBER_OF_ELEMENTS = 100; private final ServiceBusManagementClientImpl managementClient; private final EntitiesImpl entityClient; private final ClientLogger logger = new ClientLogger(ServiceBusAdministrationAsyncClient.class); private final ServiceBusManagementSerializer serializer; private final RulesImpl rulesClient; private final TokenCredential tokenCredential; /** * Creates a new instance with the given management client and serializer. * @param managementClient Client to make management calls. * @param serializer Serializer to deserialize ATOM XML responses. * @param credential Credential to get additional tokens if necessary */ ServiceBusAdministrationAsyncClient(ServiceBusManagementClientImpl managementClient, ServiceBusManagementSerializer serializer, TokenCredential credential) { this.serializer = Objects.requireNonNull(serializer, "'serializer' cannot be null."); this.managementClient = Objects.requireNonNull(managementClient, "'managementClient' cannot be null."); this.entityClient = managementClient.getEntities(); this.rulesClient = managementClient.getRules(); this.tokenCredential = credential; } /** * Creates a queue with the given name. * * @param queueName Name of the queue to create. * * @return A Mono that completes with information about the created queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code queueName} is null. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws ResourceExistsException if a queue exists with the same {@code queueName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueProperties> createQueue(String queueName) { try { return createQueue(queueName, new CreateQueueOptions()); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Creates a queue with the {@link CreateQueueOptions} and given queue name. * * @param queueName Name of the queue to create. * @param queueOptions Options about the queue to create. * * @return A Mono that completes with information about the created queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} or {@code queueOptions} is null. * @throws ResourceExistsException if a queue exists with the same {@link QueueProperties * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueProperties> createQueue(String queueName, CreateQueueOptions queueOptions) { return createQueueWithResponse(queueName, queueOptions).map(Response::getValue); } /** * Creates a queue and returns the created queue in addition to the HTTP response. * * @param queueName Name of the queue to create. * @param queueOptions Options about the queue to create. * * @return A Mono that returns the created queue in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} or {@code queueOptions} is null. * @throws ResourceExistsException if a queue exists with the same {@link QueueProperties * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<QueueProperties>> createQueueWithResponse(String queueName, CreateQueueOptions queueOptions) { return withContext(context -> createQueueWithResponse(queueName, queueOptions, context)); } /** * Creates a rule under the given topic and subscription * * @param topicName Name of the topic associated with rule. * @param subscriptionName Name of the subscription associated with the rule. * @param ruleName Name of the rule. * * @return A Mono that completes with information about the created rule. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} are are empty strings. * @throws NullPointerException if {@code topicName} or {@code ruleName} are are null. * @throws ResourceExistsException if a rule exists with the same topic, subscription, and rule name. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<RuleProperties> createRule(String topicName, String subscriptionName, String ruleName) { try { return createRule(topicName, subscriptionName, ruleName, new CreateRuleOptions()); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Creates a rule with the {@link CreateRuleOptions}. * * @param topicName Name of the topic associated with rule. * @param subscriptionName Name of the subscription associated with the rule. * @param ruleName Name of the rule. * @param ruleOptions Information about the rule to create. * * @return A Mono that completes with information about the created rule. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} are are empty strings. * @throws NullPointerException if {@code topicName}, {@code ruleName}, or {@code ruleOptions} * are are null. * @throws ResourceExistsException if a rule exists with the same topic and rule name. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<RuleProperties> createRule(String topicName, String subscriptionName, String ruleName, CreateRuleOptions ruleOptions) { return createRuleWithResponse(topicName, subscriptionName, ruleName, ruleOptions) .map(Response::getValue); } /** * Creates a rule and returns the created rule in addition to the HTTP response. * * @param topicName Name of the topic associated with rule. * @param subscriptionName Name of the subscription associated with the rule. * @param ruleName Name of the rule. * @param ruleOptions Information about the rule to create. * * @return A Mono that returns the created rule in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} are are empty strings. * @throws NullPointerException if {@code topicName}, {@code ruleName}, or {@code ruleOptions} * are are null. * @throws ResourceExistsException if a rule exists with the same topic and rule name. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<RuleProperties>> createRuleWithResponse(String topicName, String subscriptionName, String ruleName, CreateRuleOptions ruleOptions) { return withContext(context -> createRuleWithResponse(topicName, subscriptionName, ruleName, ruleOptions, context)); } /** * Creates a subscription with the given topic and subscription names. * * @param topicName Name of the topic associated with subscription. * @param subscriptionName Name of the subscription. * * @return A Mono that completes with information about the created subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are are empty strings. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} are are null. * @throws ResourceExistsException if a subscription exists with the same topic and subscription name. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionProperties> createSubscription(String topicName, String subscriptionName) { try { return createSubscription(topicName, subscriptionName, new CreateSubscriptionOptions()); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Creates a subscription with the {@link CreateSubscriptionOptions}. * * @param topicName Name of the topic associated with subscription. * @param subscriptionName Name of the subscription. * @param subscriptionOptions Information about the subscription to create. * * @return A Mono that completes with information about the created subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are are empty strings. * @throws NullPointerException if {@code topicName}, {@code subscriptionName}, or {@code subscriptionOptions} * are are null. * @throws ResourceExistsException if a subscription exists with the same topic and subscription name. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionProperties> createSubscription(String topicName, String subscriptionName, CreateSubscriptionOptions subscriptionOptions) { return createSubscriptionWithResponse(topicName, subscriptionName, subscriptionOptions) .map(Response::getValue); } /** * Creates a subscription and returns the created subscription in addition to the HTTP response. * * @param topicName Name of the topic associated with subscription. * @param subscriptionName Name of the subscription. * @param subscriptionOptions Information about the subscription to create. * * @return A Mono that returns the created subscription in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are are empty strings. * @throws NullPointerException if {@code topicName}, {@code subscriptionName}, or {@code subscriptionOptions} * are are null. * @throws ResourceExistsException if a subscription exists with the same topic and subscription name. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SubscriptionProperties>> createSubscriptionWithResponse(String topicName, String subscriptionName, CreateSubscriptionOptions subscriptionOptions) { return withContext(context -> createSubscriptionWithResponse(topicName, subscriptionName, subscriptionOptions, context)); } /** * Creates a topic with the given name. * * @param topicName Name of the topic to create. * * @return A Mono that completes with information about the created topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code topicName} is null. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws ResourceExistsException if a topic exists with the same {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicProperties> createTopic(String topicName) { try { return createTopic(topicName, new CreateTopicOptions()); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Creates a topic with the {@link CreateTopicOptions}. * * @param topicName Name of the topic to create. * @param topicOptions The options used to create the topic. * * @return A Mono that completes with information about the created topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code topicOptions} is null. * @throws ResourceExistsException if a topic exists with the same {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicProperties> createTopic(String topicName, CreateTopicOptions topicOptions) { return createTopicWithResponse(topicName, topicOptions).map(Response::getValue); } /** * Creates a topic and returns the created topic in addition to the HTTP response. * * @param topicName Name of the topic to create. * @param topicOptions The options used to create the topic. * * @return A Mono that returns the created topic in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link TopicProperties * string. * @throws NullPointerException if {@code topicName} or {@code topicOptions} is null. * @throws ResourceExistsException if a topic exists with the same {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<TopicProperties>> createTopicWithResponse(String topicName, CreateTopicOptions topicOptions) { return withContext(context -> createTopicWithResponse(topicName, topicOptions, context)); } /** * Deletes a queue the matching {@code queueName}. * * @param queueName Name of queue to delete. * * @return A Mono that completes when the queue is deleted. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws NullPointerException if {@code queueName} is null. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteQueue(String queueName) { return deleteQueueWithResponse(queueName).then(); } /** * Deletes a queue the matching {@code queueName} and returns the HTTP response. * * @param queueName Name of queue to delete. * * @return A Mono that completes when the queue is deleted and returns the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws NullPointerException if {@code queueName} is null. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteQueueWithResponse(String queueName) { return withContext(context -> deleteQueueWithResponse(queueName, context)); } /** * Deletes a rule the matching {@code ruleName}. * * @param topicName Name of topic associated with rule to delete. * @param subscriptionName Name of the subscription associated with the rule to delete. * @param ruleName Name of rule to delete. * * @return A Mono that completes when the rule is deleted. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code ruleName} is null. * @throws ResourceNotFoundException if the {@code ruleName} does not exist. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteRule(String topicName, String subscriptionName, String ruleName) { return deleteRuleWithResponse(topicName, subscriptionName, ruleName).then(); } /** * Deletes a rule the matching {@code ruleName} and returns the HTTP response. * * @param topicName Name of topic associated with rule to delete. * @param subscriptionName Name of the subscription associated with the rule to delete. * @param ruleName Name of rule to delete. * * @return A Mono that completes when the rule is deleted and returns the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName}, {@code subscriptionName}, or {@code ruleName} is an * empty string. * @throws NullPointerException if {@code topicName}, {@code subscriptionName}, or {@code ruleName} is null. * @throws ResourceNotFoundException if the {@code ruleName} does not exist. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteRuleWithResponse(String topicName, String subscriptionName, String ruleName) { return withContext(context -> deleteRuleWithResponse(topicName, subscriptionName, ruleName, context)); } /** * Deletes a subscription the matching {@code subscriptionName}. * * @param topicName Name of topic associated with subscription to delete. * @param subscriptionName Name of subscription to delete. * * @return A Mono that completes when the subscription is deleted. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} is null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteSubscription(String topicName, String subscriptionName) { return deleteSubscriptionWithResponse(topicName, subscriptionName).then(); } /** * Deletes a subscription the matching {@code subscriptionName} and returns the HTTP response. * * @param topicName Name of topic associated with subscription to delete. * @param subscriptionName Name of subscription to delete. * * @return A Mono that completes when the subscription is deleted and returns the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} is null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteSubscriptionWithResponse(String topicName, String subscriptionName) { return withContext(context -> deleteSubscriptionWithResponse(topicName, subscriptionName, context)); } /** * Deletes a topic the matching {@code topicName}. * * @param topicName Name of topic to delete. * * @return A Mono that completes when the topic is deleted. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteTopic(String topicName) { return deleteTopicWithResponse(topicName).then(); } /** * Deletes a topic the matching {@code topicName} and returns the HTTP response. * * @param topicName Name of topic to delete. * * @return A Mono that completes when the topic is deleted and returns the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteTopicWithResponse(String topicName) { return withContext(context -> deleteTopicWithResponse(topicName, context)); } /** * Gets information about the queue. * * @param queueName Name of queue to get information about. * * @return A Mono that completes with information about the queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueProperties> getQueue(String queueName) { return getQueueWithResponse(queueName).map(Response::getValue); } /** * Gets information about the queue along with its HTTP response. * * @param queueName Name of queue to get information about. * * @return A Mono that completes with information about the queue and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<QueueProperties>> getQueueWithResponse(String queueName) { return withContext(context -> getQueueWithResponse(queueName, context, Function.identity())); } /** * Gets whether or not a queue with {@code queueName} exists in the Service Bus namespace. * * @param queueName Name of the queue. * * @return A Mono that completes indicating whether or not the queue exists. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Boolean> getQueueExists(String queueName) { return getQueueExistsWithResponse(queueName).map(Response::getValue); } /** * Gets whether or not a queue with {@code queueName} exists in the Service Bus namespace. * * @param queueName Name of the queue. * * @return A Mono that completes indicating whether or not the queue exists along with its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Boolean>> getQueueExistsWithResponse(String queueName) { return getEntityExistsWithResponse(getQueueWithResponse(queueName)); } /** * Gets runtime properties about the queue. * * @param queueName Name of queue to get information about. * * @return A Mono that completes with runtime properties about the queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueRuntimeProperties> getQueueRuntimeProperties(String queueName) { return getQueueRuntimePropertiesWithResponse(queueName).map(response -> response.getValue()); } /** * Gets runtime properties about the queue along with its HTTP response. * * @param queueName Name of queue to get information about. * * @return A Mono that completes with runtime properties about the queue and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<QueueRuntimeProperties>> getQueueRuntimePropertiesWithResponse(String queueName) { return withContext(context -> getQueueWithResponse(queueName, context, QueueRuntimeProperties::new)); } /** * Gets information about the Service Bus namespace. * * @return A Mono that completes with information about the Service Bus namespace. * @throws ClientAuthenticationException if the client's credentials do not have access to the namespace. * @throws HttpResponseException If error occurred processing the request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<NamespaceProperties> getNamespaceProperties() { return getNamespacePropertiesWithResponse().map(Response::getValue); } /** * Gets information about the Service Bus namespace along with its HTTP response. * * @return A Mono that completes with information about the namespace and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<NamespaceProperties>> getNamespacePropertiesWithResponse() { return withContext(this::getNamespacePropertiesWithResponse); } /** * Gets a rule from the service namespace. * * Only following data types are deserialized in Filters and Action parameters - string, int, long, boolean, double, * and OffsetDateTime. Other data types would return its string value. * * @param topicName The name of the topic relative to service bus namespace. * @param subscriptionName The subscription name the rule belongs to. * @param ruleName The name of the rule to retrieve. * * @return The associated rule. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<RuleProperties> getRule(String topicName, String subscriptionName, String ruleName) { return getRuleWithResponse(topicName, subscriptionName, ruleName).map(response -> response.getValue()); } /** * Gets a rule from the service namespace. * * Only following data types are deserialized in Filters and Action parameters - string, int, long, bool, double, * and OffsetDateTime. Other data types would return its string value. * * @param topicName The name of the topic relative to service bus namespace. * @param subscriptionName The subscription name the rule belongs to. * @param ruleName The name of the rule to retrieve. * * @return The associated rule with the corresponding HTTP response. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<RuleProperties>> getRuleWithResponse(String topicName, String subscriptionName, String ruleName) { return withContext(context -> getRuleWithResponse(topicName, subscriptionName, ruleName, context)); } /** * Gets information about the queue. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return A Mono that completes with information about the subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are empty strings. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} are null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist in the {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionProperties> getSubscription(String topicName, String subscriptionName) { return getSubscriptionWithResponse(topicName, subscriptionName).map(Response::getValue); } /** * Gets information about the subscription along with its HTTP response. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return A Mono that completes with information about the subscription and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are empty strings. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} are null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SubscriptionProperties>> getSubscriptionWithResponse(String topicName, String subscriptionName) { return withContext(context -> getSubscriptionWithResponse(topicName, subscriptionName, context, Function.identity())); } /** * Gets whether or not a subscription within a topic exists. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of the subscription. * * @return A Mono that completes indicating whether or not the subscription exists. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code subscriptionName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Boolean> getSubscriptionExists(String topicName, String subscriptionName) { return getSubscriptionExistsWithResponse(topicName, subscriptionName).map(Response::getValue); } /** * Gets whether or not a subscription within a topic exists. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of the subscription. * * @return A Mono that completes indicating whether or not the subscription exists along with its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code subscriptionName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Boolean>> getSubscriptionExistsWithResponse(String topicName, String subscriptionName) { return getEntityExistsWithResponse(getSubscriptionWithResponse(topicName, subscriptionName)); } /** * Gets runtime properties about the subscription. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return A Mono that completes with runtime properties about the subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are empty strings. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} are null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionRuntimeProperties> getSubscriptionRuntimeProperties( String topicName, String subscriptionName) { return getSubscriptionRuntimePropertiesWithResponse(topicName, subscriptionName) .map(response -> response.getValue()); } /** * Gets runtime properties about the subscription. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return A Mono that completes with runtime properties about the subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code subscriptionName} is null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SubscriptionRuntimeProperties>> getSubscriptionRuntimePropertiesWithResponse( String topicName, String subscriptionName) { return withContext(context -> getSubscriptionWithResponse(topicName, subscriptionName, context, SubscriptionRuntimeProperties::new)); } /** * Gets information about the topic. * * @param topicName Name of topic to get information about. * * @return A Mono that completes with information about the topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicProperties> getTopic(String topicName) { return getTopicWithResponse(topicName).map(Response::getValue); } /** * Gets information about the topic along with its HTTP response. * * @param topicName Name of topic to get information about. * * @return A Mono that completes with information about the topic and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<TopicProperties>> getTopicWithResponse(String topicName) { return withContext(context -> getTopicWithResponse(topicName, context, Function.identity())); } /** * Gets whether or not a topic with {@code topicName} exists in the Service Bus namespace. * * @param topicName Name of the topic. * * @return A Mono that completes indicating whether or not the topic exists. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Boolean> getTopicExists(String topicName) { return getTopicExistsWithResponse(topicName).map(Response::getValue); } /** * Gets whether or not a topic with {@code topicName} exists in the Service Bus namespace. * * @param topicName Name of the topic. * * @return A Mono that completes indicating whether or not the topic exists along with its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Boolean>> getTopicExistsWithResponse(String topicName) { return getEntityExistsWithResponse(getTopicWithResponse(topicName)); } /** * Gets runtime properties about the topic. * * @param topicName Name of topic to get information about. * * @return A Mono that completes with runtime properties about the topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicRuntimeProperties> getTopicRuntimeProperties(String topicName) { return getTopicRuntimePropertiesWithResponse(topicName).map(response -> response.getValue()); } /** * Gets runtime properties about the topic with its HTTP response. * * @param topicName Name of topic to get information about. * * @return A Mono that completes with runtime properties about the topic and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<TopicRuntimeProperties>> getTopicRuntimePropertiesWithResponse(String topicName) { return withContext(context -> getTopicWithResponse(topicName, context, TopicRuntimeProperties::new)); } /** * Fetches all the queues in the Service Bus namespace. * * @return A Flux of {@link QueueProperties queues} in the Service Bus namespace. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<QueueProperties> listQueues() { return new PagedFlux<>( () -> withContext(context -> listQueuesFirstPage(context)), token -> withContext(context -> listQueuesNextPage(token, context))); } /** * Fetches all the rules for a topic and subscription. * * @param topicName The topic name under which all the rules need to be retrieved. * @param subscriptionName The name of the subscription for which all rules need to be retrieved. * * @return A Flux of {@link RuleProperties rules} for the {@code topicName} and {@code subscriptionName}. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} is null. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} is an empty string. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<RuleProperties> listRules(String topicName, String subscriptionName) { if (topicName == null) { return pagedFluxError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return pagedFluxError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } return new PagedFlux<>( () -> withContext(context -> listRulesFirstPage(topicName, subscriptionName, context)), token -> withContext(context -> listRulesNextPage(topicName, subscriptionName, token, context))); } /** * Fetches all the subscriptions for a topic. * * @param topicName The topic name under which all the subscriptions need to be retrieved. * * @return A Flux of {@link SubscriptionProperties subscriptions} for the {@code topicName}. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws NullPointerException if {@code topicName} is null. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<SubscriptionProperties> listSubscriptions(String topicName) { if (topicName == null) { return pagedFluxError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return pagedFluxError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } return new PagedFlux<>( () -> withContext(context -> listSubscriptionsFirstPage(topicName, context)), token -> withContext(context -> listSubscriptionsNextPage(topicName, token, context))); } /** * Fetches all the topics in the Service Bus namespace. * * @return A Flux of {@link TopicProperties topics} in the Service Bus namespace. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<TopicProperties> listTopics() { return new PagedFlux<>( () -> withContext(context -> listTopicsFirstPage(context)), token -> withContext(context -> listTopicsNextPage(token, context))); } /** * Updates a queue with the given {@link QueueProperties}. The {@link QueueProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link QueueProperties * <li>{@link QueueProperties * <li>{@link QueueProperties * </li> * <li>{@link QueueProperties * </ul> * * @param queue Information about the queue to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that completes with the updated queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code queue} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueProperties> updateQueue(QueueProperties queue) { return updateQueueWithResponse(queue).map(Response::getValue); } /** * Updates a queue with the given {@link QueueProperties}. The {@link QueueProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link QueueProperties * <li>{@link QueueProperties * <li>{@link QueueProperties * </li> * <li>{@link QueueProperties * </ul> * * @param queue Information about the queue to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated queue in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code queue} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<QueueProperties>> updateQueueWithResponse(QueueProperties queue) { return withContext(context -> updateQueueWithResponse(queue, context)); } /** * Updates a rule with the given {@link RuleProperties}. The {@link RuleProperties} must be fully populated as all * of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * @param topicName The topic name under which the rule is updated. * @param subscriptionName The name of the subscription for which the rule is updated. * @param rule Information about the rule to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated rule. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the rule quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link RuleProperties * @throws NullPointerException if {@code rule} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<RuleProperties> updateRule(String topicName, String subscriptionName, RuleProperties rule) { return updateRuleWithResponse(topicName, subscriptionName, rule).map(Response::getValue); } /** * Updates a rule with the given {@link RuleProperties}. The {@link RuleProperties} must be fully populated as all * of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * @param topicName The topic name under which the rule is updated. * @param subscriptionName The name of the subscription for which the rule is updated. * @param rule Information about the rule to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated rule in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the rule quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link RuleProperties * @throws NullPointerException if {@code rule} is null. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<RuleProperties>> updateRuleWithResponse(String topicName, String subscriptionName, RuleProperties rule) { return withContext(context -> updateRuleWithResponse(topicName, subscriptionName, rule, context)); } /** * Updates a subscription with the given {@link SubscriptionProperties}. The {@link SubscriptionProperties} must be * fully populated as all of the properties are replaced. If a property is not set the service default value is * used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * </ul> * * @param subscription Information about the subscription to update. You must provide all the property values * that are desired on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the subscription quota is exceeded, or an * error occurred processing the request. * @throws IllegalArgumentException if {@link SubscriptionProperties * SubscriptionProperties * @throws NullPointerException if {@code subscription} is null. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionProperties> updateSubscription(SubscriptionProperties subscription) { return updateSubscriptionWithResponse(subscription).map(Response::getValue); } /** * Updates a subscription with the given {@link SubscriptionProperties}. The {@link SubscriptionProperties} must be * fully populated as all of the properties are replaced. If a property is not set the service default value is * used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * </ul> * * @param subscription Information about the subscription to update. You must provide all the property values * that are desired on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated subscription in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the subscription quota is exceeded, or an * error occurred processing the request. * @throws IllegalArgumentException if {@link SubscriptionProperties * SubscriptionProperties * @throws NullPointerException if {@code subscription} is null. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SubscriptionProperties>> updateSubscriptionWithResponse( SubscriptionProperties subscription) { return withContext(context -> updateSubscriptionWithResponse(subscription, context)); } /** * Updates a topic with the given {@link TopicProperties}. The {@link TopicProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link TopicProperties * <li>{@link TopicProperties * </li> * </ul> * * @param topic Information about the topic to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that completes with the updated topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link TopicProperties * string. * @throws NullPointerException if {@code topic} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicProperties> updateTopic(TopicProperties topic) { return updateTopicWithResponse(topic).map(Response::getValue); } /** * Updates a topic with the given {@link TopicProperties}. The {@link TopicProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link TopicProperties * <li>{@link TopicProperties * </li> * </ul> * * @param topic Information about the topic to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that completes with the updated topic and its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link TopicProperties * string. * @throws NullPointerException if {@code topic} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<TopicProperties>> updateTopicWithResponse(TopicProperties topic) { return withContext(context -> updateTopicWithResponse(topic, context)); } /** * Creates a queue with its context. * * @param createQueueOptions Queue to create. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link QueueProperties}. */ Mono<Response<QueueProperties>> createQueueWithResponse(String queueName, CreateQueueOptions createQueueOptions, Context context) { if (queueName == null) { return monoError(logger, new NullPointerException("'queueName' cannot be null.")); } else if (queueName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'queueName' cannot be empty.")); } if (createQueueOptions == null) { return monoError(logger, new NullPointerException("'createQueueOptions' cannot be null.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final HttpHeaders supplementaryAuthHeaders = new HttpHeaders(); Context additionalContext = context.addData(AZ_TRACING_NAMESPACE_KEY, SERVICE_BUS_TRACING_NAMESPACE_VALUE); if (!CoreUtils.isNullOrEmpty(createQueueOptions.getForwardTo())) { addAdditionalAuthHeader(SERVICE_BUS_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, supplementaryAuthHeaders); createQueueOptions.setForwardTo(String.format("https: createQueueOptions.getForwardTo())); } if (!CoreUtils.isNullOrEmpty(createQueueOptions.getForwardDeadLetteredMessagesTo())) { addAdditionalAuthHeader(SERVICE_BUS_DLQ_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, supplementaryAuthHeaders); createQueueOptions.setForwardDeadLetteredMessagesTo(String.format("https: createQueueOptions.getForwardDeadLetteredMessagesTo())); } if (supplementaryAuthHeaders.getSize() != 0) { additionalContext = additionalContext.addData(AZURE_REQUEST_HTTP_HEADERS_KEY, supplementaryAuthHeaders); } final QueueDescription description = EntityHelper.getQueueDescription(createQueueOptions); final CreateQueueBodyContent content = new CreateQueueBodyContent() .setType(CONTENT_TYPE) .setQueueDescription(description); final CreateQueueBody createEntity = new CreateQueueBody() .setContent(content); try { return entityClient.putWithResponseAsync(queueName, createEntity, null, additionalContext) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(this::deserializeQueue); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a rule with its context. * * @param ruleOptions Rule to create. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link RuleProperties}. */ Mono<Response<RuleProperties>> createRuleWithResponse(String topicName, String subscriptionName, String ruleName, CreateRuleOptions ruleOptions, Context context) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be empty.")); } if (subscriptionName == null) { return monoError(logger, new NullPointerException("'subscriptionName' cannot be null.")); } else if (subscriptionName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'subscriptionName' cannot be empty.")); } if (ruleName == null) { return monoError(logger, new NullPointerException("'ruleName' cannot be null.")); } else if (ruleName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'ruleName' cannot be empty.")); } if (ruleOptions == null) { return monoError(logger, new NullPointerException("'rule' cannot be null.")); } final RuleActionImpl action = ruleOptions.getAction() != null ? EntityHelper.toImplementation(ruleOptions.getAction()) : null; final RuleFilterImpl filter = ruleOptions.getFilter() != null ? EntityHelper.toImplementation(ruleOptions.getFilter()) : null; final RuleDescription rule = new RuleDescription() .setAction(action) .setFilter(filter) .setName(ruleName); final CreateRuleBodyContent content = new CreateRuleBodyContent() .setType(CONTENT_TYPE) .setRuleDescription(rule); final CreateRuleBody createEntity = new CreateRuleBody().setContent(content); final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, SERVICE_BUS_TRACING_NAMESPACE_VALUE); try { return managementClient.getRules().putWithResponseAsync(topicName, subscriptionName, ruleName, createEntity, null, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeRule(response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a subscription with its context. * * @param subscriptionOptions Subscription to create. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link SubscriptionProperties}. */ Mono<Response<SubscriptionProperties>> createSubscriptionWithResponse(String topicName, String subscriptionName, CreateSubscriptionOptions subscriptionOptions, Context context) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be empty.")); } if (subscriptionName == null) { return monoError(logger, new NullPointerException("'subscriptionName' cannot be null.")); } else if (subscriptionName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'subscriptionName' cannot be empty.")); } if (subscriptionOptions == null) { return monoError(logger, new NullPointerException("'subscription' cannot be null.")); } final HttpHeaders supplementaryAuthHeaders = new HttpHeaders(); Context additionalContext = context.addData(AZ_TRACING_NAMESPACE_KEY, SERVICE_BUS_TRACING_NAMESPACE_VALUE); if (!CoreUtils.isNullOrEmpty(subscriptionOptions.getForwardTo())) { addAdditionalAuthHeader(SERVICE_BUS_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, supplementaryAuthHeaders); subscriptionOptions.setForwardTo(String.format("https: subscriptionOptions.getForwardTo())); } if (!CoreUtils.isNullOrEmpty(subscriptionOptions.getForwardDeadLetteredMessagesTo())) { addAdditionalAuthHeader(SERVICE_BUS_DLQ_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, supplementaryAuthHeaders); subscriptionOptions.setForwardDeadLetteredMessagesTo(String.format("https: subscriptionOptions.getForwardDeadLetteredMessagesTo())); } if (supplementaryAuthHeaders.getSize() != 0) { additionalContext = additionalContext.addData(AZURE_REQUEST_HTTP_HEADERS_KEY, supplementaryAuthHeaders); } final SubscriptionDescription subscription = EntityHelper.getSubscriptionDescription(subscriptionOptions); final CreateSubscriptionBodyContent content = new CreateSubscriptionBodyContent() .setType(CONTENT_TYPE) .setSubscriptionDescription(subscription); final CreateSubscriptionBody createEntity = new CreateSubscriptionBody().setContent(content); try { return managementClient.getSubscriptions().putWithResponseAsync(topicName, subscriptionName, createEntity, null, additionalContext) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeSubscription(topicName, response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a topicOptions with its context. * * @param topicOptions Topic to create. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link TopicProperties}. */ Mono<Response<TopicProperties>> createTopicWithResponse(String topicName, CreateTopicOptions topicOptions, Context context) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be empty.")); } if (topicOptions == null) { return monoError(logger, new NullPointerException("'topicOptions' cannot be null")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final TopicDescription topic = EntityHelper.getTopicDescription(topicOptions); final CreateTopicBodyContent content = new CreateTopicBodyContent() .setType(CONTENT_TYPE) .setTopicDescription(topic); final CreateTopicBody createEntity = new CreateTopicBody() .setContent(content); final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, SERVICE_BUS_TRACING_NAMESPACE_VALUE); try { return entityClient.putWithResponseAsync(topicName, createEntity, null, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(this::deserializeTopic); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes a queue with its context. * * @param queueName Name of queue to delete. * @param context Context to pass into request. * * @return A Mono that completes when the queue is deleted. */ Mono<Response<Void>> deleteQueueWithResponse(String queueName, Context context) { if (queueName == null) { return monoError(logger, new NullPointerException("'queueName' cannot be null")); } else if (queueName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'queueName' cannot be an empty string.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, SERVICE_BUS_TRACING_NAMESPACE_VALUE); try { return entityClient.deleteWithResponseAsync(queueName, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes a queue with its context. * * @param topicName Name of topic to delete. * @param subscriptionName Name of the subscription for the rule. * @param ruleName Name of the rule. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link QueueProperties}. */ Mono<Response<Void>> deleteRuleWithResponse(String topicName, String subscriptionName, String ruleName, Context context) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } else if (subscriptionName == null) { return monoError(logger, new NullPointerException("'subscriptionName' cannot be null")); } else if (subscriptionName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'subscriptionName' cannot be an empty string.")); } else if (ruleName == null) { return monoError(logger, new NullPointerException("'ruleName' cannot be null")); } else if (ruleName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'ruleName' cannot be an empty string.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, SERVICE_BUS_TRACING_NAMESPACE_VALUE); try { return rulesClient.deleteWithResponseAsync(topicName, subscriptionName, ruleName, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes a subscription with its context. * * @param topicName Name of topic associated with subscription to delete. * @param subscriptionName Name of subscription to delete. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link SubscriptionProperties}. */ Mono<Response<Void>> deleteSubscriptionWithResponse(String topicName, String subscriptionName, Context context) { if (subscriptionName == null) { return monoError(logger, new NullPointerException("'subscriptionName' cannot be null")); } else if (subscriptionName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'subscriptionName' cannot be an empty string.")); } else if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, SERVICE_BUS_TRACING_NAMESPACE_VALUE); try { return managementClient.getSubscriptions().deleteWithResponseAsync(topicName, subscriptionName, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes a topic with its context. * * @param topicName Name of topic to delete. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link TopicProperties}. */ Mono<Response<Void>> deleteTopicWithResponse(String topicName, Context context) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, SERVICE_BUS_TRACING_NAMESPACE_VALUE); try { return entityClient.deleteWithResponseAsync(topicName, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets whether an entity exists. * * @param getEntityOperation Operation to get information about entity. If {@link ResourceNotFoundException} is * thrown, then it is mapped to false. * @param <T> Entity type. * * @return True if the entity exists, false otherwise. */ <T> Mono<Response<Boolean>> getEntityExistsWithResponse(Mono<Response<T>> getEntityOperation) { return getEntityOperation.map(response -> { final boolean exists = response.getValue() != null; return (Response<Boolean>) new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), exists); }) .onErrorResume(ResourceNotFoundException.class, exception -> { final HttpResponse response = exception.getResponse(); final Response<Boolean> result = new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), false); return Mono.just(result); }); } /** * Gets a queue with its context. * * @param queueName Name of queue to fetch information for. * @param context Context to pass into request. * * @return A Mono that completes with the {@link QueueProperties}. */ <T> Mono<Response<T>> getQueueWithResponse(String queueName, Context context, Function<QueueProperties, T> mapper) { if (queueName == null) { return monoError(logger, new NullPointerException("'queueName' cannot be null")); } else if (queueName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'queueName' cannot be empty.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, SERVICE_BUS_TRACING_NAMESPACE_VALUE); try { return entityClient.getWithResponseAsync(queueName, true, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .handle((response, sink) -> { final Response<QueueProperties> deserialize = deserializeQueue(response); if (deserialize.getValue() == null) { final HttpResponse notFoundResponse = new EntityNotFoundHttpResponse<>(deserialize); sink.error(new ResourceNotFoundException(String.format("Queue '%s' does not exist.", queueName), notFoundResponse)); } else { final T mapped = mapper.apply(deserialize.getValue()); sink.next(new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), mapped)); } }); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<RuleProperties>> getRuleWithResponse(String topicName, String subscriptionName, String ruleName, Context context) { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, SERVICE_BUS_TRACING_NAMESPACE_VALUE); try { return rulesClient.getWithResponseAsync(topicName, subscriptionName, ruleName, true, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(this::deserializeRule); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets a subscription with its context. * * @param topicName Name of the topic associated with the subscription. * @param subscriptionName Name of subscription to fetch information for. * @param context Context to pass into request. * * @return A Mono that completes with the {@link SubscriptionProperties}. */ <T> Mono<Response<T>> getSubscriptionWithResponse(String topicName, String subscriptionName, Context context, Function<SubscriptionProperties, T> mapper) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } else if (subscriptionName == null) { return monoError(logger, new NullPointerException("'subscriptionName' cannot be null.")); } else if (subscriptionName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'subscriptionName' cannot be an empty string.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, SERVICE_BUS_TRACING_NAMESPACE_VALUE); try { return managementClient.getSubscriptions().getWithResponseAsync(topicName, subscriptionName, true, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .handle((response, sink) -> { final Response<SubscriptionProperties> deserialize = deserializeSubscription(topicName, response); if (deserialize.getValue() == null) { final HttpResponse notFoundResponse = new EntityNotFoundHttpResponse<>(deserialize); sink.error(new ResourceNotFoundException(String.format( "Subscription '%s' in topic '%s' does not exist.", topicName, subscriptionName), notFoundResponse)); } else { final T mapped = mapper.apply(deserialize.getValue()); sink.next(new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), mapped)); } }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets the namespace properties with its context. * * @param context Context to pass into request. * * @return A Mono that completes with the {@link NamespaceProperties}. */ Mono<Response<NamespaceProperties>> getNamespacePropertiesWithResponse(Context context) { return managementClient.getNamespaces().getWithResponseAsync(context).handle((response, sink) -> { final NamespacePropertiesEntry entry = response.getValue(); if (entry == null || entry.getContent() == null) { sink.error(new AzureException( "There was no content inside namespace response. Entry: " + response)); return; } final NamespaceProperties namespaceProperties = entry.getContent().getNamespaceProperties(); final Response<NamespaceProperties> result = new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), namespaceProperties); sink.next(result); }); } /** * Gets a topic with its context. * * @param topicName Name of topic to fetch information for. * @param context Context to pass into request. * * @return A Mono that completes with the {@link TopicProperties}. */ <T> Mono<Response<T>> getTopicWithResponse(String topicName, Context context, Function<TopicProperties, T> mapper) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be empty.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, SERVICE_BUS_TRACING_NAMESPACE_VALUE); try { return entityClient.getWithResponseAsync(topicName, true, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .handle((response, sink) -> { final Response<TopicProperties> deserialize = deserializeTopic(response); if (deserialize.getValue() == null) { final HttpResponse notFoundResponse = new EntityNotFoundHttpResponse<>(deserialize); sink.error(new ResourceNotFoundException(String.format("Topic '%s' does not exist.", topicName), notFoundResponse)); } else { final T mapped = mapper.apply(deserialize.getValue()); sink.next(new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), mapped)); } }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets the first page of queues with context. * * @param context Context to pass into request. * * @return A Mono that completes with a page of queues. */ Mono<PagedResponse<QueueProperties>> listQueuesFirstPage(Context context) { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, SERVICE_BUS_TRACING_NAMESPACE_VALUE); try { return listQueues(0, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the next page of queues with context. * * @param continuationToken Number of items to skip in feed. * @param context Context to pass into request. * * @return A Mono that completes with a page of queues or empty if there are no items left. */ Mono<PagedResponse<QueueProperties>> listQueuesNextPage(String continuationToken, Context context) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } try { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, SERVICE_BUS_TRACING_NAMESPACE_VALUE); final int skip = Integer.parseInt(continuationToken); return listQueues(skip, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the first page of rules with context. * * @param context Context to pass into request. * * @return A Mono that completes with a page of rules. */ Mono<PagedResponse<RuleProperties>> listRulesFirstPage(String topicName, String subscriptionName, Context context) { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, SERVICE_BUS_TRACING_NAMESPACE_VALUE); try { return listRules(topicName, subscriptionName, 0, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the next page of rules with context. * * @param continuationToken Number of items to skip in feed. * @param context Context to pass into request. * * @return A Mono that completes with a page of rules or empty if there are no items left. */ Mono<PagedResponse<RuleProperties>> listRulesNextPage(String topicName, String subscriptionName, String continuationToken, Context context) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } try { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, SERVICE_BUS_TRACING_NAMESPACE_VALUE); final int skip = Integer.parseInt(continuationToken); return listRules(topicName, subscriptionName, skip, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the first page of subscriptions with context. * * @param context Context to pass into request. * * @return A Mono that completes with a page of subscriptions. */ Mono<PagedResponse<SubscriptionProperties>> listSubscriptionsFirstPage(String topicName, Context context) { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, SERVICE_BUS_TRACING_NAMESPACE_VALUE); try { return listSubscriptions(topicName, 0, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the next page of subscriptions with context. * * @param continuationToken Number of items to skip in feed. * @param context Context to pass into request. * * @return A Mono that completes with a page of subscriptions or empty if there are no items left. */ Mono<PagedResponse<SubscriptionProperties>> listSubscriptionsNextPage(String topicName, String continuationToken, Context context) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } try { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, SERVICE_BUS_TRACING_NAMESPACE_VALUE); final int skip = Integer.parseInt(continuationToken); return listSubscriptions(topicName, skip, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the first page of topics with context. * * @param context Context to pass into request. * * @return A Mono that completes with a page of topics. */ Mono<PagedResponse<TopicProperties>> listTopicsFirstPage(Context context) { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, SERVICE_BUS_TRACING_NAMESPACE_VALUE); try { return listTopics(0, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the next page of topics with context. * * @param continuationToken Number of items to skip in feed. * @param context Context to pass into request. * * @return A Mono that completes with a page of topics or empty if there are no items left. */ Mono<PagedResponse<TopicProperties>> listTopicsNextPage(String continuationToken, Context context) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } try { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, SERVICE_BUS_TRACING_NAMESPACE_VALUE); final int skip = Integer.parseInt(continuationToken); return listTopics(skip, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Updates a queue with its context. * * @param queue Information about the queue to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * @param context Context to pass into request. * * @return A Mono that completes with the updated {@link QueueProperties}. */ Mono<Response<QueueProperties>> updateQueueWithResponse(QueueProperties queue, Context context) { if (queue == null) { final HttpHeaders supplementaryAuthHeaders = new HttpHeaders(); Context additionalContext = context.addData(AZ_TRACING_NAMESPACE_KEY, SERVICE_BUS_TRACING_NAMESPACE_VALUE); if (!CoreUtils.isNullOrEmpty(queue.getForwardTo())) { addAdditionalAuthHeader(SERVICE_BUS_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, supplementaryAuthHeaders); queue.setForwardTo(String.format("https: queue.getForwardTo())); } if (!CoreUtils.isNullOrEmpty(queue.getForwardDeadLetteredMessagesTo())) { addAdditionalAuthHeader(SERVICE_BUS_DLQ_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, supplementaryAuthHeaders); queue.setForwardDeadLetteredMessagesTo(String.format("https: queue.getForwardDeadLetteredMessagesTo())); } if (supplementaryAuthHeaders.getSize() != 0) { additionalContext = additionalContext.addData(AZURE_REQUEST_HTTP_HEADERS_KEY, supplementaryAuthHeaders); } final QueueDescription queueDescription = EntityHelper.toImplementation(queue); final CreateQueueBodyContent content = new CreateQueueBodyContent() .setType(CONTENT_TYPE) .setQueueDescription(queueDescription); final CreateQueueBody createEntity = new CreateQueueBody() .setContent(content); try { return entityClient.putWithResponseAsync(queue.getName(), createEntity, "*", additionalContext) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeQueue(response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates a rule with its context. * * @param rule Information about the rule to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * @param context Context to pass into request. * * @return A Mono that completes with the updated {@link RuleProperties}. */ Mono<Response<RuleProperties>> updateRuleWithResponse(String topicName, String subscriptionName, RuleProperties rule, Context context) { if (rule == null) { return monoError(logger, new NullPointerException("'rule' cannot be null")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final RuleDescription implementation = EntityHelper.toImplementation(rule); final CreateRuleBodyContent content = new CreateRuleBodyContent() .setType(CONTENT_TYPE) .setRuleDescription(implementation); final CreateRuleBody ruleBody = new CreateRuleBody() .setContent(content); final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, SERVICE_BUS_TRACING_NAMESPACE_VALUE); try { return managementClient.getRules().putWithResponseAsync(topicName, subscriptionName, rule.getName(), ruleBody, "*", withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeRule(response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates a subscription with its context. * * @param subscription Information about the subscription to update. You must provide all the property values * that are desired on the updated entity. Any values not provided are set to the service default values. * @param context Context to pass into request. * * @return A Mono that completes with the updated {@link SubscriptionProperties}. */ Mono<Response<SubscriptionProperties>> updateSubscriptionWithResponse(SubscriptionProperties subscription, Context context) { if (subscription == null) { return monoError(logger, new NullPointerException("'subscription' cannot be null")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final HttpHeaders supplementaryAuthHeaders = new HttpHeaders(); Context additionalContext = context.addData(AZ_TRACING_NAMESPACE_KEY, SERVICE_BUS_TRACING_NAMESPACE_VALUE); if (!CoreUtils.isNullOrEmpty(subscription.getForwardTo())) { addAdditionalAuthHeader(SERVICE_BUS_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, supplementaryAuthHeaders); subscription.setForwardTo(String.format("https: subscription.getForwardTo())); } if (!CoreUtils.isNullOrEmpty(subscription.getForwardDeadLetteredMessagesTo())) { addAdditionalAuthHeader(SERVICE_BUS_DLQ_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, supplementaryAuthHeaders); subscription.setForwardDeadLetteredMessagesTo(String.format("https: subscription.getForwardDeadLetteredMessagesTo())); } if (supplementaryAuthHeaders.getSize() != 0) { additionalContext = additionalContext.addData(AZURE_REQUEST_HTTP_HEADERS_KEY, supplementaryAuthHeaders); } final String topicName = subscription.getTopicName(); final String subscriptionName = subscription.getSubscriptionName(); final SubscriptionDescription implementation = EntityHelper.toImplementation(subscription); final CreateSubscriptionBodyContent content = new CreateSubscriptionBodyContent() .setType(CONTENT_TYPE) .setSubscriptionDescription(implementation); final CreateSubscriptionBody createEntity = new CreateSubscriptionBody() .setContent(content); try { return managementClient.getSubscriptions().putWithResponseAsync(topicName, subscriptionName, createEntity, "*", additionalContext) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeSubscription(topicName, response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates a topic with its context. * * @param topic Information about the topic to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * @param context Context to pass into request. * * @return A Mono that completes with the updated {@link TopicProperties}. */ Mono<Response<TopicProperties>> updateTopicWithResponse(TopicProperties topic, Context context) { if (topic == null) { return monoError(logger, new NullPointerException("'topic' cannot be null")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final TopicDescription implementation = EntityHelper.toImplementation(topic); final CreateTopicBodyContent content = new CreateTopicBodyContent() .setType(CONTENT_TYPE) .setTopicDescription(implementation); final CreateTopicBody createEntity = new CreateTopicBody() .setContent(content); final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, SERVICE_BUS_TRACING_NAMESPACE_VALUE); try { return entityClient.putWithResponseAsync(topic.getName(), createEntity, "*", withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeTopic(response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } private <T> T deserialize(Object object, Class<T> clazz) { if (object == null) { return null; } final String contents = String.valueOf(object); if (contents.isEmpty()) { return null; } try { return serializer.deserialize(contents, clazz); } catch (IOException e) { throw logger.logExceptionAsError(new RuntimeException(String.format( "Exception while deserializing. Body: [%s]. Class: %s", contents, clazz), e)); } } /** * Given an HTTP response, will deserialize it into a strongly typed Response object. * * @param response HTTP response to deserialize response body from. * @param clazz Class to deserialize response type into. * @param <T> Class type to deserialize response into. * * @return A Response with a strongly typed response value. */ private <T> Response<T> deserialize(Response<Object> response, Class<T> clazz) { final T deserialize = deserialize(response.getValue(), clazz); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), deserialize); } /** * Converts a Response into its corresponding {@link QueueDescriptionEntry} then mapped into {@link * QueueProperties}. * * @param response HTTP Response to deserialize. * * @return The corresponding HTTP response with convenience properties set. */ private Response<QueueProperties> deserializeQueue(Response<Object> response) { final QueueDescriptionEntry entry = deserialize(response.getValue(), QueueDescriptionEntry.class); if (entry == null) { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent() == null) { logger.info("entry.getContent() is null. The entity may not exist. {}", entry); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent().getQueueDescription() == null) { final TopicDescriptionEntry entryTopic = deserialize(response.getValue(), TopicDescriptionEntry.class); if (entryTopic != null && entryTopic.getContent() != null && entryTopic.getContent().getTopicDescription() != null) { logger.warning("'{}' is not a queue, it is a topic.", entryTopic.getTitle()); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } } final QueueProperties result = EntityHelper.toModel(entry.getContent().getQueueDescription()); final String queueName = getTitleValue(entry.getTitle()); EntityHelper.setQueueName(result, queueName); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), result); } /** * Converts a Response into its corresponding {@link RuleDescriptionEntry} then mapped into {@link RuleProperties}. * * @param response HTTP Response to deserialize. * * @return The corresponding HTTP response with convenience properties set. */ private Response<RuleProperties> deserializeRule(Response<Object> response) { final RuleDescriptionEntry entry = deserialize(response.getValue(), RuleDescriptionEntry.class); if (entry == null) { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent() == null) { logger.info("entry.getContent() is null. The entity may not exist. {}", entry); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } final RuleDescription description = entry.getContent().getRuleDescription(); final RuleProperties result = EntityHelper.toModel(description); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), result); } /** * Converts a Response into its corresponding {@link SubscriptionDescriptionEntry} then mapped into {@link * SubscriptionProperties}. * * @param response HTTP Response to deserialize. * * @return The corresponding HTTP response with convenience properties set. */ private Response<SubscriptionProperties> deserializeSubscription(String topicName, Response<Object> response) { final SubscriptionDescriptionEntry entry = deserialize(response.getValue(), SubscriptionDescriptionEntry.class); if (entry == null) { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent() == null) { logger.warning("entry.getContent() is null. There should have been content returned. Entry: {}", entry); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } final SubscriptionProperties subscription = EntityHelper.toModel( entry.getContent().getSubscriptionDescription()); final String subscriptionName = getTitleValue(entry.getTitle()); EntityHelper.setSubscriptionName(subscription, subscriptionName); EntityHelper.setTopicName(subscription, topicName); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), subscription); } /** * Converts a Response into its corresponding {@link TopicDescriptionEntry} then mapped into {@link * QueueProperties}. * * @param response HTTP Response to deserialize. * * @return The corresponding HTTP response with convenience properties set. */ private Response<TopicProperties> deserializeTopic(Response<Object> response) { final TopicDescriptionEntry entry = deserialize(response.getValue(), TopicDescriptionEntry.class); if (entry == null) { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent() == null) { logger.warning("entry.getContent() is null. There should have been content returned. Entry: {}", entry); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent().getTopicDescription() == null) { final QueueDescriptionEntry entryQueue = deserialize(response.getValue(), QueueDescriptionEntry.class); if (entryQueue != null && entryQueue.getContent() != null && entryQueue.getContent().getQueueDescription() != null) { logger.warning("'{}' is not a topic, it is a queue.", entryQueue.getTitle()); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } } final TopicProperties result = EntityHelper.toModel(entry.getContent().getTopicDescription()); final String topicName = getTitleValue(entry.getTitle()); EntityHelper.setTopicName(result, topicName); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), result); } /** * Creates a {@link FeedPage} given the elements and a set of response links to get the next link from. * * @param entities Entities in the feed. * @param responseLinks Links returned from the feed. * @param <TResult> Type of Service Bus entities in page. * * @return A {@link FeedPage} indicating whether this can be continued or not. * @throws MalformedURLException if the "next" page link does not contain a well-formed URL. */ private <TResult, TFeed> FeedPage<TResult> extractPage(Response<TFeed> response, List<TResult> entities, List<ResponseLink> responseLinks) throws MalformedURLException, UnsupportedEncodingException { final Optional<ResponseLink> nextLink = responseLinks.stream() .filter(link -> link.getRel().equalsIgnoreCase("next")) .findFirst(); if (!nextLink.isPresent()) { return new FeedPage<>(response.getStatusCode(), response.getHeaders(), response.getRequest(), entities); } final URL url = new URL(nextLink.get().getHref()); final String decode = URLDecoder.decode(url.getQuery(), StandardCharsets.UTF_8.name()); final Optional<Integer> skipParameter = Arrays.stream(decode.split("&amp;|&")) .map(part -> part.split("=", 2)) .filter(parts -> parts[0].equalsIgnoreCase("$skip") && parts.length == 2) .map(parts -> Integer.valueOf(parts[1])) .findFirst(); if (skipParameter.isPresent()) { return new FeedPage<>(response.getStatusCode(), response.getHeaders(), response.getRequest(), entities, skipParameter.get()); } else { logger.warning("There should have been a skip parameter for the next page."); return new FeedPage<>(response.getStatusCode(), response.getHeaders(), response.getRequest(), entities); } } /** * Helper method that invokes the service method, extracts the data and translates it to a PagedResponse. * * @param skip Number of elements to skip. * @param context Context for the query. * * @return A Mono that completes with a paged response of queues. */ private Mono<PagedResponse<QueueProperties>> listQueues(int skip, Context context) { return managementClient.listEntitiesWithResponseAsync(QUEUES_ENTITY_TYPE, skip, NUMBER_OF_ELEMENTS, context) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .flatMap(response -> { final Response<QueueDescriptionFeed> feedResponse = deserialize(response, QueueDescriptionFeed.class); final QueueDescriptionFeed feed = feedResponse.getValue(); if (feed == null) { logger.warning("Could not deserialize QueueDescriptionFeed. skip {}, top: {}", skip, NUMBER_OF_ELEMENTS); return Mono.empty(); } final List<QueueProperties> entities = feed.getEntry().stream() .filter(e -> e.getContent() != null && e.getContent().getQueueDescription() != null) .map(e -> { final String queueName = getTitleValue(e.getTitle()); final QueueProperties queueProperties = EntityHelper.toModel( e.getContent().getQueueDescription()); EntityHelper.setQueueName(queueProperties, queueName); return queueProperties; }) .collect(Collectors.toList()); try { return Mono.just(extractPage(feedResponse, entities, feed.getLink())); } catch (MalformedURLException | UnsupportedEncodingException error) { return Mono.error(new RuntimeException("Could not parse response into FeedPage<QueueDescription>", error)); } }); } /** * Helper method that invokes the service method, extracts the data and translates it to a PagedResponse. * * @param skip Number of elements to skip. * @param context Context for the query. * * @return A Mono that completes with a paged response of rules. */ private Mono<PagedResponse<RuleProperties>> listRules(String topicName, String subscriptionName, int skip, Context context) { return managementClient.listRulesWithResponseAsync(topicName, subscriptionName, skip, NUMBER_OF_ELEMENTS, context) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .flatMap(response -> { final Response<RuleDescriptionFeed> feedResponse = deserialize(response, RuleDescriptionFeed.class); final RuleDescriptionFeed feed = feedResponse.getValue(); if (feed == null) { logger.warning("Could not deserialize RuleDescriptionFeed. skip {}, top: {}", skip, NUMBER_OF_ELEMENTS); return Mono.empty(); } final List<RuleProperties> entities = feed.getEntry().stream() .filter(e -> e.getContent() != null && e.getContent().getRuleDescription() != null) .map(e -> { return EntityHelper.toModel(e.getContent().getRuleDescription()); }) .collect(Collectors.toList()); try { return Mono.just(extractPage(feedResponse, entities, feed.getLink())); } catch (MalformedURLException | UnsupportedEncodingException error) { return Mono.error(new RuntimeException( "Could not parse response into FeedPage<RuleDescription>", error)); } }); } /** * Helper method that invokes the service method, extracts the data and translates it to a PagedResponse. * * @param skip Number of elements to skip. * @param context Context for the query. * * @return A Mono that completes with a paged response of subscriptions. */ private Mono<PagedResponse<SubscriptionProperties>> listSubscriptions(String topicName, int skip, Context context) { return managementClient.listSubscriptionsWithResponseAsync(topicName, skip, NUMBER_OF_ELEMENTS, context) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .flatMap(response -> { final Response<SubscriptionDescriptionFeed> feedResponse = deserialize(response, SubscriptionDescriptionFeed.class); final SubscriptionDescriptionFeed feed = feedResponse.getValue(); if (feed == null) { logger.warning("Could not deserialize SubscriptionDescriptionFeed. skip {}, top: {}", skip, NUMBER_OF_ELEMENTS); return Mono.empty(); } final List<SubscriptionProperties> entities = feed.getEntry().stream() .filter(e -> e.getContent() != null && e.getContent().getSubscriptionDescription() != null) .map(e -> { final String subscriptionName = getTitleValue(e.getTitle()); final SubscriptionProperties description = EntityHelper.toModel( e.getContent().getSubscriptionDescription()); EntityHelper.setTopicName(description, topicName); EntityHelper.setSubscriptionName(description, subscriptionName); return description; }) .collect(Collectors.toList()); try { return Mono.just(extractPage(feedResponse, entities, feed.getLink())); } catch (MalformedURLException | UnsupportedEncodingException error) { return Mono.error(new RuntimeException( "Could not parse response into FeedPage<SubscriptionDescription>", error)); } }); } /** * Helper method that invokes the service method, extracts the data and translates it to a PagedResponse. * * @param skip Number of elements to skip. * @param context Context for the query. * * @return A Mono that completes with a paged response of topics. */ private Mono<PagedResponse<TopicProperties>> listTopics(int skip, Context context) { return managementClient.listEntitiesWithResponseAsync(TOPICS_ENTITY_TYPE, skip, NUMBER_OF_ELEMENTS, context) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .flatMap(response -> { final Response<TopicDescriptionFeed> feedResponse = deserialize(response, TopicDescriptionFeed.class); final TopicDescriptionFeed feed = feedResponse.getValue(); if (feed == null) { logger.warning("Could not deserialize TopicDescriptionFeed. skip {}, top: {}", skip, NUMBER_OF_ELEMENTS); return Mono.empty(); } final List<TopicProperties> entities = feed.getEntry().stream() .filter(e -> e.getContent() != null && e.getContent().getTopicDescription() != null) .map(e -> { final String topicName = getTitleValue(e.getTitle()); final TopicProperties topicProperties = EntityHelper.toModel( e.getContent().getTopicDescription()); EntityHelper.setTopicName(topicProperties, topicName); return topicProperties; }) .collect(Collectors.toList()); try { return Mono.just(extractPage(feedResponse, entities, feed.getLink())); } catch (MalformedURLException | UnsupportedEncodingException error) { return Mono.error(new RuntimeException("Could not parse response into FeedPage<TopicDescription>", error)); } }); } /** Adds the additional authentication headers needed for various types of forwarding options. * * @param headerName Name of the auth header */ private void addAdditionalAuthHeader(String headerName, HttpHeaders headers) { final String scope; if (tokenCredential instanceof ServiceBusSharedKeyCredential) { scope = String.format("https: } else { scope = ServiceBusConstants.AZURE_ACTIVE_DIRECTORY_SCOPE; } final Mono<AccessToken> tokenMono = tokenCredential.getToken(new TokenRequestContext().addScopes(scope)); final AccessToken token = tokenMono.block(ServiceBusConstants.OPERATION_TIMEOUT); if (headers == null || token == null) { return; } headers.add(headerName, token.getToken()); } /** * Given an XML title element, returns the XML text inside. Jackson deserializes Objects as LinkedHashMaps. XML text * is represented as an entry with an empty string as the key. * * For example, the text returned from this {@code <title text="text/xml">QueueName</title>} is "QueueName". * * @param responseTitle XML title element. * * @return The XML text inside the title. {@code null} is returned if there is no value. */ @SuppressWarnings("unchecked") private String getTitleValue(Object responseTitle) { if (!(responseTitle instanceof Map)) { return null; } final Map<String, String> map; try { map = (Map<String, String>) responseTitle; return map.get(""); } catch (ClassCastException error) { logger.warning("Unable to cast to Map<String,String>. Title: {}", responseTitle, error); return null; } } /** * Maps an exception from the ATOM APIs to its associated {@link HttpResponseException}. * * @param exception Exception from the ATOM API. * * @return The corresponding {@link HttpResponseException} or {@code throwable} if it is not an instance of {@link * ServiceBusManagementErrorException}. */ private static Throwable mapException(Throwable exception) { if (!(exception instanceof ServiceBusManagementErrorException)) { return exception; } final ServiceBusManagementErrorException managementError = ((ServiceBusManagementErrorException) exception); final ServiceBusManagementError error = managementError.getValue(); final HttpResponse errorHttpResponse = managementError.getResponse(); final int statusCode = error != null && error.getCode() != null ? error.getCode() : errorHttpResponse.getStatusCode(); final String errorDetail = error != null && error.getDetail() != null ? error.getDetail() : managementError.getMessage(); switch (statusCode) { case 401: return new ClientAuthenticationException(errorDetail, managementError.getResponse(), exception); case 404: return new ResourceNotFoundException(errorDetail, managementError.getResponse(), exception); case 409: return new ResourceExistsException(errorDetail, managementError.getResponse(), exception); case 412: return new ResourceModifiedException(errorDetail, managementError.getResponse(), exception); default: return new HttpResponseException(errorDetail, managementError.getResponse(), exception); } } /** * A page of Service Bus entities. * * @param <T> The entity description from Service Bus. */ private static final class FeedPage<T> implements PagedResponse<T> { private final int statusCode; private final HttpHeaders header; private final HttpRequest request; private final IterableStream<T> entries; private final String continuationToken; /** * Creates a page that does not have any more pages. * * @param entries Items in the page. */ private FeedPage(int statusCode, HttpHeaders header, HttpRequest request, List<T> entries) { this.statusCode = statusCode; this.header = header; this.request = request; this.entries = new IterableStream<>(entries); this.continuationToken = null; } /** * Creates an instance that has additional pages to fetch. * * @param entries Items in the page. * @param skip Number of elements to "skip". */ private FeedPage(int statusCode, HttpHeaders header, HttpRequest request, List<T> entries, int skip) { this.statusCode = statusCode; this.header = header; this.request = request; this.entries = new IterableStream<>(entries); this.continuationToken = String.valueOf(skip); } @Override public IterableStream<T> getElements() { return entries; } @Override public String getContinuationToken() { return continuationToken; } @Override public int getStatusCode() { return statusCode; } @Override public HttpHeaders getHeaders() { return header; } @Override public HttpRequest getRequest() { return request; } @Override public void close() { } } private static final class EntityNotFoundHttpResponse<T> extends HttpResponse { private final int statusCode; private final HttpHeaders headers; private EntityNotFoundHttpResponse(Response<T> response) { super(response.getRequest()); this.headers = response.getHeaders(); this.statusCode = response.getStatusCode(); } @Override public int getStatusCode() { return statusCode; } @Override public String getHeaderValue(String name) { return headers.getValue(name); } @Override public HttpHeaders getHeaders() { return headers; } @Override public Flux<ByteBuffer> getBody() { return Flux.empty(); } @Override public Mono<byte[]> getBodyAsByteArray() { return Mono.empty(); } @Override public Mono<String> getBodyAsString() { return Mono.empty(); } @Override public Mono<String> getBodyAsString(Charset charset) { return Mono.empty(); } } }
class ServiceBusAdministrationAsyncClient { private static final String CONTENT_TYPE = "application/xml"; private static final String QUEUES_ENTITY_TYPE = "queues"; private static final String TOPICS_ENTITY_TYPE = "topics"; private static final int NUMBER_OF_ELEMENTS = 100; private final ServiceBusManagementClientImpl managementClient; private final EntitiesImpl entityClient; private final ClientLogger logger = new ClientLogger(ServiceBusAdministrationAsyncClient.class); private final ServiceBusManagementSerializer serializer; private final RulesImpl rulesClient; /** * Creates a new instance with the given management client and serializer. * * @param managementClient Client to make management calls. * @param serializer Serializer to deserialize ATOM XML responses. * * @throws NullPointerException if any one of {@code managementClient, serializer, credential} is null. */ ServiceBusAdministrationAsyncClient(ServiceBusManagementClientImpl managementClient, ServiceBusManagementSerializer serializer) { this.serializer = Objects.requireNonNull(serializer, "'serializer' cannot be null."); this.managementClient = Objects.requireNonNull(managementClient, "'managementClient' cannot be null."); this.entityClient = managementClient.getEntities(); this.rulesClient = managementClient.getRules(); } /** * Creates a queue with the given name. * * @param queueName Name of the queue to create. * * @return A Mono that completes with information about the created queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code queueName} is null. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws ResourceExistsException if a queue exists with the same {@code queueName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueProperties> createQueue(String queueName) { try { return createQueue(queueName, new CreateQueueOptions()); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Creates a queue with the {@link CreateQueueOptions} and given queue name. * * @param queueName Name of the queue to create. * @param queueOptions Options about the queue to create. * * @return A Mono that completes with information about the created queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} or {@code queueOptions} is null. * @throws ResourceExistsException if a queue exists with the same {@link QueueProperties * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueProperties> createQueue(String queueName, CreateQueueOptions queueOptions) { return createQueueWithResponse(queueName, queueOptions).map(Response::getValue); } /** * Creates a queue and returns the created queue in addition to the HTTP response. * * @param queueName Name of the queue to create. * @param queueOptions Options about the queue to create. * * @return A Mono that returns the created queue in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} or {@code queueOptions} is null. * @throws ResourceExistsException if a queue exists with the same {@link QueueProperties * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<QueueProperties>> createQueueWithResponse(String queueName, CreateQueueOptions queueOptions) { return withContext(context -> createQueueWithResponse(queueName, queueOptions, context)); } /** * Creates a rule under the given topic and subscription * * @param topicName Name of the topic associated with rule. * @param subscriptionName Name of the subscription associated with the rule. * @param ruleName Name of the rule. * * @return A Mono that completes with information about the created rule. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} are are empty strings. * @throws NullPointerException if {@code topicName} or {@code ruleName} are are null. * @throws ResourceExistsException if a rule exists with the same topic, subscription, and rule name. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<RuleProperties> createRule(String topicName, String subscriptionName, String ruleName) { try { return createRule(topicName, subscriptionName, ruleName, new CreateRuleOptions()); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Creates a rule with the {@link CreateRuleOptions}. * * @param topicName Name of the topic associated with rule. * @param subscriptionName Name of the subscription associated with the rule. * @param ruleName Name of the rule. * @param ruleOptions Information about the rule to create. * * @return A Mono that completes with information about the created rule. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} are are empty strings. * @throws NullPointerException if {@code topicName}, {@code ruleName}, or {@code ruleOptions} * are are null. * @throws ResourceExistsException if a rule exists with the same topic and rule name. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<RuleProperties> createRule(String topicName, String subscriptionName, String ruleName, CreateRuleOptions ruleOptions) { return createRuleWithResponse(topicName, subscriptionName, ruleName, ruleOptions) .map(Response::getValue); } /** * Creates a rule and returns the created rule in addition to the HTTP response. * * @param topicName Name of the topic associated with rule. * @param subscriptionName Name of the subscription associated with the rule. * @param ruleName Name of the rule. * @param ruleOptions Information about the rule to create. * * @return A Mono that returns the created rule in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} are are empty strings. * @throws NullPointerException if {@code topicName}, {@code ruleName}, or {@code ruleOptions} * are are null. * @throws ResourceExistsException if a rule exists with the same topic and rule name. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<RuleProperties>> createRuleWithResponse(String topicName, String subscriptionName, String ruleName, CreateRuleOptions ruleOptions) { return withContext(context -> createRuleWithResponse(topicName, subscriptionName, ruleName, ruleOptions, context)); } /** * Creates a subscription with the given topic and subscription names. * * @param topicName Name of the topic associated with subscription. * @param subscriptionName Name of the subscription. * * @return A Mono that completes with information about the created subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are are empty strings. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} are are null. * @throws ResourceExistsException if a subscription exists with the same topic and subscription name. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionProperties> createSubscription(String topicName, String subscriptionName) { try { return createSubscription(topicName, subscriptionName, new CreateSubscriptionOptions()); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Creates a subscription with the {@link CreateSubscriptionOptions}. * * @param topicName Name of the topic associated with subscription. * @param subscriptionName Name of the subscription. * @param subscriptionOptions Information about the subscription to create. * * @return A Mono that completes with information about the created subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are are empty strings. * @throws NullPointerException if {@code topicName}, {@code subscriptionName}, or {@code subscriptionOptions} * are are null. * @throws ResourceExistsException if a subscription exists with the same topic and subscription name. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionProperties> createSubscription(String topicName, String subscriptionName, CreateSubscriptionOptions subscriptionOptions) { return createSubscriptionWithResponse(topicName, subscriptionName, subscriptionOptions) .map(Response::getValue); } /** * Creates a subscription and returns the created subscription in addition to the HTTP response. * * @param topicName Name of the topic associated with subscription. * @param subscriptionName Name of the subscription. * @param subscriptionOptions Information about the subscription to create. * * @return A Mono that returns the created subscription in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are are empty strings. * @throws NullPointerException if {@code topicName}, {@code subscriptionName}, or {@code subscriptionOptions} * are are null. * @throws ResourceExistsException if a subscription exists with the same topic and subscription name. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SubscriptionProperties>> createSubscriptionWithResponse(String topicName, String subscriptionName, CreateSubscriptionOptions subscriptionOptions) { return withContext(context -> createSubscriptionWithResponse(topicName, subscriptionName, subscriptionOptions, context)); } /** * Creates a topic with the given name. * * @param topicName Name of the topic to create. * * @return A Mono that completes with information about the created topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code topicName} is null. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws ResourceExistsException if a topic exists with the same {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicProperties> createTopic(String topicName) { try { return createTopic(topicName, new CreateTopicOptions()); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Creates a topic with the {@link CreateTopicOptions}. * * @param topicName Name of the topic to create. * @param topicOptions The options used to create the topic. * * @return A Mono that completes with information about the created topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code topicOptions} is null. * @throws ResourceExistsException if a topic exists with the same {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicProperties> createTopic(String topicName, CreateTopicOptions topicOptions) { return createTopicWithResponse(topicName, topicOptions).map(Response::getValue); } /** * Creates a topic and returns the created topic in addition to the HTTP response. * * @param topicName Name of the topic to create. * @param topicOptions The options used to create the topic. * * @return A Mono that returns the created topic in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link TopicProperties * string. * @throws NullPointerException if {@code topicName} or {@code topicOptions} is null. * @throws ResourceExistsException if a topic exists with the same {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<TopicProperties>> createTopicWithResponse(String topicName, CreateTopicOptions topicOptions) { return withContext(context -> createTopicWithResponse(topicName, topicOptions, context)); } /** * Deletes a queue the matching {@code queueName}. * * @param queueName Name of queue to delete. * * @return A Mono that completes when the queue is deleted. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws NullPointerException if {@code queueName} is null. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteQueue(String queueName) { return deleteQueueWithResponse(queueName).then(); } /** * Deletes a queue the matching {@code queueName} and returns the HTTP response. * * @param queueName Name of queue to delete. * * @return A Mono that completes when the queue is deleted and returns the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws NullPointerException if {@code queueName} is null. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteQueueWithResponse(String queueName) { return withContext(context -> deleteQueueWithResponse(queueName, context)); } /** * Deletes a rule the matching {@code ruleName}. * * @param topicName Name of topic associated with rule to delete. * @param subscriptionName Name of the subscription associated with the rule to delete. * @param ruleName Name of rule to delete. * * @return A Mono that completes when the rule is deleted. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code ruleName} is null. * @throws ResourceNotFoundException if the {@code ruleName} does not exist. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteRule(String topicName, String subscriptionName, String ruleName) { return deleteRuleWithResponse(topicName, subscriptionName, ruleName).then(); } /** * Deletes a rule the matching {@code ruleName} and returns the HTTP response. * * @param topicName Name of topic associated with rule to delete. * @param subscriptionName Name of the subscription associated with the rule to delete. * @param ruleName Name of rule to delete. * * @return A Mono that completes when the rule is deleted and returns the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName}, {@code subscriptionName}, or {@code ruleName} is an * empty string. * @throws NullPointerException if {@code topicName}, {@code subscriptionName}, or {@code ruleName} is null. * @throws ResourceNotFoundException if the {@code ruleName} does not exist. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteRuleWithResponse(String topicName, String subscriptionName, String ruleName) { return withContext(context -> deleteRuleWithResponse(topicName, subscriptionName, ruleName, context)); } /** * Deletes a subscription the matching {@code subscriptionName}. * * @param topicName Name of topic associated with subscription to delete. * @param subscriptionName Name of subscription to delete. * * @return A Mono that completes when the subscription is deleted. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} is null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteSubscription(String topicName, String subscriptionName) { return deleteSubscriptionWithResponse(topicName, subscriptionName).then(); } /** * Deletes a subscription the matching {@code subscriptionName} and returns the HTTP response. * * @param topicName Name of topic associated with subscription to delete. * @param subscriptionName Name of subscription to delete. * * @return A Mono that completes when the subscription is deleted and returns the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} is null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteSubscriptionWithResponse(String topicName, String subscriptionName) { return withContext(context -> deleteSubscriptionWithResponse(topicName, subscriptionName, context)); } /** * Deletes a topic the matching {@code topicName}. * * @param topicName Name of topic to delete. * * @return A Mono that completes when the topic is deleted. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteTopic(String topicName) { return deleteTopicWithResponse(topicName).then(); } /** * Deletes a topic the matching {@code topicName} and returns the HTTP response. * * @param topicName Name of topic to delete. * * @return A Mono that completes when the topic is deleted and returns the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteTopicWithResponse(String topicName) { return withContext(context -> deleteTopicWithResponse(topicName, context)); } /** * Gets information about the queue. * * @param queueName Name of queue to get information about. * * @return A Mono that completes with information about the queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueProperties> getQueue(String queueName) { return getQueueWithResponse(queueName).map(Response::getValue); } /** * Gets information about the queue along with its HTTP response. * * @param queueName Name of queue to get information about. * * @return A Mono that completes with information about the queue and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<QueueProperties>> getQueueWithResponse(String queueName) { return withContext(context -> getQueueWithResponse(queueName, context, Function.identity())); } /** * Gets whether or not a queue with {@code queueName} exists in the Service Bus namespace. * * @param queueName Name of the queue. * * @return A Mono that completes indicating whether or not the queue exists. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Boolean> getQueueExists(String queueName) { return getQueueExistsWithResponse(queueName).map(Response::getValue); } /** * Gets whether or not a queue with {@code queueName} exists in the Service Bus namespace. * * @param queueName Name of the queue. * * @return A Mono that completes indicating whether or not the queue exists along with its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Boolean>> getQueueExistsWithResponse(String queueName) { return getEntityExistsWithResponse(getQueueWithResponse(queueName)); } /** * Gets runtime properties about the queue. * * @param queueName Name of queue to get information about. * * @return A Mono that completes with runtime properties about the queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueRuntimeProperties> getQueueRuntimeProperties(String queueName) { return getQueueRuntimePropertiesWithResponse(queueName).map(response -> response.getValue()); } /** * Gets runtime properties about the queue along with its HTTP response. * * @param queueName Name of queue to get information about. * * @return A Mono that completes with runtime properties about the queue and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<QueueRuntimeProperties>> getQueueRuntimePropertiesWithResponse(String queueName) { return withContext(context -> getQueueWithResponse(queueName, context, QueueRuntimeProperties::new)); } /** * Gets information about the Service Bus namespace. * * @return A Mono that completes with information about the Service Bus namespace. * @throws ClientAuthenticationException if the client's credentials do not have access to the namespace. * @throws HttpResponseException If error occurred processing the request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<NamespaceProperties> getNamespaceProperties() { return getNamespacePropertiesWithResponse().map(Response::getValue); } /** * Gets information about the Service Bus namespace along with its HTTP response. * * @return A Mono that completes with information about the namespace and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<NamespaceProperties>> getNamespacePropertiesWithResponse() { return withContext(this::getNamespacePropertiesWithResponse); } /** * Gets a rule from the service namespace. * * Only following data types are deserialized in Filters and Action parameters - string, int, long, boolean, double, * and OffsetDateTime. Other data types would return its string value. * * @param topicName The name of the topic relative to service bus namespace. * @param subscriptionName The subscription name the rule belongs to. * @param ruleName The name of the rule to retrieve. * * @return The associated rule. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<RuleProperties> getRule(String topicName, String subscriptionName, String ruleName) { return getRuleWithResponse(topicName, subscriptionName, ruleName).map(response -> response.getValue()); } /** * Gets a rule from the service namespace. * * Only following data types are deserialized in Filters and Action parameters - string, int, long, bool, double, * and OffsetDateTime. Other data types would return its string value. * * @param topicName The name of the topic relative to service bus namespace. * @param subscriptionName The subscription name the rule belongs to. * @param ruleName The name of the rule to retrieve. * * @return The associated rule with the corresponding HTTP response. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<RuleProperties>> getRuleWithResponse(String topicName, String subscriptionName, String ruleName) { return withContext(context -> getRuleWithResponse(topicName, subscriptionName, ruleName, context)); } /** * Gets information about the queue. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return A Mono that completes with information about the subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are empty strings. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} are null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist in the {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionProperties> getSubscription(String topicName, String subscriptionName) { return getSubscriptionWithResponse(topicName, subscriptionName).map(Response::getValue); } /** * Gets information about the subscription along with its HTTP response. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return A Mono that completes with information about the subscription and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are empty strings. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} are null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SubscriptionProperties>> getSubscriptionWithResponse(String topicName, String subscriptionName) { return withContext(context -> getSubscriptionWithResponse(topicName, subscriptionName, context, Function.identity())); } /** * Gets whether or not a subscription within a topic exists. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of the subscription. * * @return A Mono that completes indicating whether or not the subscription exists. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code subscriptionName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Boolean> getSubscriptionExists(String topicName, String subscriptionName) { return getSubscriptionExistsWithResponse(topicName, subscriptionName).map(Response::getValue); } /** * Gets whether or not a subscription within a topic exists. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of the subscription. * * @return A Mono that completes indicating whether or not the subscription exists along with its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code subscriptionName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Boolean>> getSubscriptionExistsWithResponse(String topicName, String subscriptionName) { return getEntityExistsWithResponse(getSubscriptionWithResponse(topicName, subscriptionName)); } /** * Gets runtime properties about the subscription. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return A Mono that completes with runtime properties about the subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are empty strings. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} are null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionRuntimeProperties> getSubscriptionRuntimeProperties( String topicName, String subscriptionName) { return getSubscriptionRuntimePropertiesWithResponse(topicName, subscriptionName) .map(response -> response.getValue()); } /** * Gets runtime properties about the subscription. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return A Mono that completes with runtime properties about the subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code subscriptionName} is null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SubscriptionRuntimeProperties>> getSubscriptionRuntimePropertiesWithResponse( String topicName, String subscriptionName) { return withContext(context -> getSubscriptionWithResponse(topicName, subscriptionName, context, SubscriptionRuntimeProperties::new)); } /** * Gets information about the topic. * * @param topicName Name of topic to get information about. * * @return A Mono that completes with information about the topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicProperties> getTopic(String topicName) { return getTopicWithResponse(topicName).map(Response::getValue); } /** * Gets information about the topic along with its HTTP response. * * @param topicName Name of topic to get information about. * * @return A Mono that completes with information about the topic and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<TopicProperties>> getTopicWithResponse(String topicName) { return withContext(context -> getTopicWithResponse(topicName, context, Function.identity())); } /** * Gets whether or not a topic with {@code topicName} exists in the Service Bus namespace. * * @param topicName Name of the topic. * * @return A Mono that completes indicating whether or not the topic exists. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Boolean> getTopicExists(String topicName) { return getTopicExistsWithResponse(topicName).map(Response::getValue); } /** * Gets whether or not a topic with {@code topicName} exists in the Service Bus namespace. * * @param topicName Name of the topic. * * @return A Mono that completes indicating whether or not the topic exists along with its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Boolean>> getTopicExistsWithResponse(String topicName) { return getEntityExistsWithResponse(getTopicWithResponse(topicName)); } /** * Gets runtime properties about the topic. * * @param topicName Name of topic to get information about. * * @return A Mono that completes with runtime properties about the topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicRuntimeProperties> getTopicRuntimeProperties(String topicName) { return getTopicRuntimePropertiesWithResponse(topicName).map(response -> response.getValue()); } /** * Gets runtime properties about the topic with its HTTP response. * * @param topicName Name of topic to get information about. * * @return A Mono that completes with runtime properties about the topic and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<TopicRuntimeProperties>> getTopicRuntimePropertiesWithResponse(String topicName) { return withContext(context -> getTopicWithResponse(topicName, context, TopicRuntimeProperties::new)); } /** * Fetches all the queues in the Service Bus namespace. * * @return A Flux of {@link QueueProperties queues} in the Service Bus namespace. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<QueueProperties> listQueues() { return new PagedFlux<>( () -> withContext(context -> listQueuesFirstPage(context)), token -> withContext(context -> listQueuesNextPage(token, context))); } /** * Fetches all the rules for a topic and subscription. * * @param topicName The topic name under which all the rules need to be retrieved. * @param subscriptionName The name of the subscription for which all rules need to be retrieved. * * @return A Flux of {@link RuleProperties rules} for the {@code topicName} and {@code subscriptionName}. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} is null. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} is an empty string. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<RuleProperties> listRules(String topicName, String subscriptionName) { if (topicName == null) { return pagedFluxError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return pagedFluxError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } return new PagedFlux<>( () -> withContext(context -> listRulesFirstPage(topicName, subscriptionName, context)), token -> withContext(context -> listRulesNextPage(topicName, subscriptionName, token, context))); } /** * Fetches all the subscriptions for a topic. * * @param topicName The topic name under which all the subscriptions need to be retrieved. * * @return A Flux of {@link SubscriptionProperties subscriptions} for the {@code topicName}. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws NullPointerException if {@code topicName} is null. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<SubscriptionProperties> listSubscriptions(String topicName) { if (topicName == null) { return pagedFluxError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return pagedFluxError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } return new PagedFlux<>( () -> withContext(context -> listSubscriptionsFirstPage(topicName, context)), token -> withContext(context -> listSubscriptionsNextPage(topicName, token, context))); } /** * Fetches all the topics in the Service Bus namespace. * * @return A Flux of {@link TopicProperties topics} in the Service Bus namespace. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<TopicProperties> listTopics() { return new PagedFlux<>( () -> withContext(context -> listTopicsFirstPage(context)), token -> withContext(context -> listTopicsNextPage(token, context))); } /** * Updates a queue with the given {@link QueueProperties}. The {@link QueueProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link QueueProperties * <li>{@link QueueProperties * <li>{@link QueueProperties * </li> * <li>{@link QueueProperties * </ul> * * @param queue Information about the queue to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that completes with the updated queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code queue} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueProperties> updateQueue(QueueProperties queue) { return updateQueueWithResponse(queue).map(Response::getValue); } /** * Updates a queue with the given {@link QueueProperties}. The {@link QueueProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link QueueProperties * <li>{@link QueueProperties * <li>{@link QueueProperties * </li> * <li>{@link QueueProperties * </ul> * * @param queue Information about the queue to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated queue in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code queue} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<QueueProperties>> updateQueueWithResponse(QueueProperties queue) { return withContext(context -> updateQueueWithResponse(queue, context)); } /** * Updates a rule with the given {@link RuleProperties}. The {@link RuleProperties} must be fully populated as all * of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * @param topicName The topic name under which the rule is updated. * @param subscriptionName The name of the subscription for which the rule is updated. * @param rule Information about the rule to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated rule. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the rule quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link RuleProperties * @throws NullPointerException if {@code rule} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<RuleProperties> updateRule(String topicName, String subscriptionName, RuleProperties rule) { return updateRuleWithResponse(topicName, subscriptionName, rule).map(Response::getValue); } /** * Updates a rule with the given {@link RuleProperties}. The {@link RuleProperties} must be fully populated as all * of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * @param topicName The topic name under which the rule is updated. * @param subscriptionName The name of the subscription for which the rule is updated. * @param rule Information about the rule to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated rule in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the rule quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link RuleProperties * @throws NullPointerException if {@code rule} is null. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<RuleProperties>> updateRuleWithResponse(String topicName, String subscriptionName, RuleProperties rule) { return withContext(context -> updateRuleWithResponse(topicName, subscriptionName, rule, context)); } /** * Updates a subscription with the given {@link SubscriptionProperties}. The {@link SubscriptionProperties} must be * fully populated as all of the properties are replaced. If a property is not set the service default value is * used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * </ul> * * @param subscription Information about the subscription to update. You must provide all the property values * that are desired on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the subscription quota is exceeded, or an * error occurred processing the request. * @throws IllegalArgumentException if {@link SubscriptionProperties * SubscriptionProperties * @throws NullPointerException if {@code subscription} is null. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionProperties> updateSubscription(SubscriptionProperties subscription) { return updateSubscriptionWithResponse(subscription).map(Response::getValue); } /** * Updates a subscription with the given {@link SubscriptionProperties}. The {@link SubscriptionProperties} must be * fully populated as all of the properties are replaced. If a property is not set the service default value is * used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * </ul> * * @param subscription Information about the subscription to update. You must provide all the property values * that are desired on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated subscription in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the subscription quota is exceeded, or an * error occurred processing the request. * @throws IllegalArgumentException if {@link SubscriptionProperties * SubscriptionProperties * @throws NullPointerException if {@code subscription} is null. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SubscriptionProperties>> updateSubscriptionWithResponse( SubscriptionProperties subscription) { return withContext(context -> updateSubscriptionWithResponse(subscription, context)); } /** * Updates a topic with the given {@link TopicProperties}. The {@link TopicProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link TopicProperties * <li>{@link TopicProperties * </li> * </ul> * * @param topic Information about the topic to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that completes with the updated topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link TopicProperties * string. * @throws NullPointerException if {@code topic} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicProperties> updateTopic(TopicProperties topic) { return updateTopicWithResponse(topic).map(Response::getValue); } /** * Updates a topic with the given {@link TopicProperties}. The {@link TopicProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link TopicProperties * <li>{@link TopicProperties * </li> * </ul> * * @param topic Information about the topic to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that completes with the updated topic and its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link TopicProperties * string. * @throws NullPointerException if {@code topic} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<TopicProperties>> updateTopicWithResponse(TopicProperties topic) { return withContext(context -> updateTopicWithResponse(topic, context)); } /** * Creates a queue with its context. * * @param createQueueOptions Queue to create. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link QueueProperties}. */ Mono<Response<QueueProperties>> createQueueWithResponse(String queueName, CreateQueueOptions createQueueOptions, Context context) { if (queueName == null) { return monoError(logger, new NullPointerException("'queueName' cannot be null.")); } else if (queueName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'queueName' cannot be empty.")); } if (createQueueOptions == null) { return monoError(logger, new NullPointerException("'createQueueOptions' cannot be null.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context contextWithHeaders = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE) .addData(AZURE_REQUEST_HTTP_HEADERS_KEY, new HttpHeaders()); final String forwardToEntity = createQueueOptions.getForwardTo(); if (!CoreUtils.isNullOrEmpty(forwardToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardToEntity, contextWithHeaders); createQueueOptions.setForwardTo(getAbsoluteUrlFromEntity(forwardToEntity)); } final String forwardDlqToEntity = createQueueOptions.getForwardDeadLetteredMessagesTo(); if (!CoreUtils.isNullOrEmpty(forwardDlqToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_DLQ_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardDlqToEntity, contextWithHeaders); createQueueOptions.setForwardDeadLetteredMessagesTo(getAbsoluteUrlFromEntity(forwardDlqToEntity)); } final QueueDescription description = EntityHelper.getQueueDescription(createQueueOptions); final CreateQueueBodyContent content = new CreateQueueBodyContent() .setType(CONTENT_TYPE) .setQueueDescription(description); final CreateQueueBody createEntity = new CreateQueueBody() .setContent(content); try { return entityClient.putWithResponseAsync(queueName, createEntity, null, contextWithHeaders) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(this::deserializeQueue); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a rule with its context. * * @param ruleOptions Rule to create. * @param context Context to pass into request. * * * @return A Mono that completes with the created {@link RuleProperties}. */ Mono<Response<RuleProperties>> createRuleWithResponse(String topicName, String subscriptionName, String ruleName, CreateRuleOptions ruleOptions, Context context) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be empty.")); } if (subscriptionName == null) { return monoError(logger, new NullPointerException("'subscriptionName' cannot be null.")); } else if (subscriptionName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'subscriptionName' cannot be empty.")); } if (ruleName == null) { return monoError(logger, new NullPointerException("'ruleName' cannot be null.")); } else if (ruleName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'ruleName' cannot be empty.")); } if (ruleOptions == null) { return monoError(logger, new NullPointerException("'rule' cannot be null.")); } final RuleActionImpl action = ruleOptions.getAction() != null ? EntityHelper.toImplementation(ruleOptions.getAction()) : null; final RuleFilterImpl filter = ruleOptions.getFilter() != null ? EntityHelper.toImplementation(ruleOptions.getFilter()) : null; final RuleDescription rule = new RuleDescription() .setAction(action) .setFilter(filter) .setName(ruleName); final CreateRuleBodyContent content = new CreateRuleBodyContent() .setType(CONTENT_TYPE) .setRuleDescription(rule); final CreateRuleBody createEntity = new CreateRuleBody().setContent(content); final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return managementClient.getRules().putWithResponseAsync(topicName, subscriptionName, ruleName, createEntity, null, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeRule(response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a subscription with its context. * * @param subscriptionOptions Subscription to create. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link SubscriptionProperties}. */ Mono<Response<SubscriptionProperties>> createSubscriptionWithResponse(String topicName, String subscriptionName, CreateSubscriptionOptions subscriptionOptions, Context context) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be empty.")); } if (subscriptionName == null) { return monoError(logger, new NullPointerException("'subscriptionName' cannot be null.")); } else if (subscriptionName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'subscriptionName' cannot be empty.")); } if (subscriptionOptions == null) { return monoError(logger, new NullPointerException("'subscription' cannot be null.")); } final Context contextWithHeaders = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE) .addData(AZURE_REQUEST_HTTP_HEADERS_KEY, new HttpHeaders()); final String forwardToEntity = subscriptionOptions.getForwardTo(); if (!CoreUtils.isNullOrEmpty(forwardToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardToEntity, contextWithHeaders); subscriptionOptions.setForwardTo(getAbsoluteUrlFromEntity(forwardToEntity)); } final String forwardDlqToEntity = subscriptionOptions.getForwardDeadLetteredMessagesTo(); if (!CoreUtils.isNullOrEmpty(forwardDlqToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_DLQ_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardDlqToEntity, contextWithHeaders); subscriptionOptions.setForwardDeadLetteredMessagesTo(getAbsoluteUrlFromEntity(forwardDlqToEntity)); } final SubscriptionDescription subscription = EntityHelper.getSubscriptionDescription(subscriptionOptions); final CreateSubscriptionBodyContent content = new CreateSubscriptionBodyContent() .setType(CONTENT_TYPE) .setSubscriptionDescription(subscription); final CreateSubscriptionBody createEntity = new CreateSubscriptionBody().setContent(content); try { return managementClient.getSubscriptions().putWithResponseAsync(topicName, subscriptionName, createEntity, null, contextWithHeaders) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeSubscription(topicName, response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a topicOptions with its context. * * @param topicOptions Topic to create. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link TopicProperties}. */ Mono<Response<TopicProperties>> createTopicWithResponse(String topicName, CreateTopicOptions topicOptions, Context context) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be empty.")); } if (topicOptions == null) { return monoError(logger, new NullPointerException("'topicOptions' cannot be null")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final TopicDescription topic = EntityHelper.getTopicDescription(topicOptions); final CreateTopicBodyContent content = new CreateTopicBodyContent() .setType(CONTENT_TYPE) .setTopicDescription(topic); final CreateTopicBody createEntity = new CreateTopicBody() .setContent(content); final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return entityClient.putWithResponseAsync(topicName, createEntity, null, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(this::deserializeTopic); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes a queue with its context. * * @param queueName Name of queue to delete. * @param context Context to pass into request. * * @return A Mono that completes when the queue is deleted. */ Mono<Response<Void>> deleteQueueWithResponse(String queueName, Context context) { if (queueName == null) { return monoError(logger, new NullPointerException("'queueName' cannot be null")); } else if (queueName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'queueName' cannot be an empty string.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return entityClient.deleteWithResponseAsync(queueName, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes a queue with its context. * * @param topicName Name of topic to delete. * @param subscriptionName Name of the subscription for the rule. * @param ruleName Name of the rule. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link QueueProperties}. */ Mono<Response<Void>> deleteRuleWithResponse(String topicName, String subscriptionName, String ruleName, Context context) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } else if (subscriptionName == null) { return monoError(logger, new NullPointerException("'subscriptionName' cannot be null")); } else if (subscriptionName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'subscriptionName' cannot be an empty string.")); } else if (ruleName == null) { return monoError(logger, new NullPointerException("'ruleName' cannot be null")); } else if (ruleName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'ruleName' cannot be an empty string.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return rulesClient.deleteWithResponseAsync(topicName, subscriptionName, ruleName, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes a subscription with its context. * * @param topicName Name of topic associated with subscription to delete. * @param subscriptionName Name of subscription to delete. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link SubscriptionProperties}. */ Mono<Response<Void>> deleteSubscriptionWithResponse(String topicName, String subscriptionName, Context context) { if (subscriptionName == null) { return monoError(logger, new NullPointerException("'subscriptionName' cannot be null")); } else if (subscriptionName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'subscriptionName' cannot be an empty string.")); } else if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return managementClient.getSubscriptions().deleteWithResponseAsync(topicName, subscriptionName, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes a topic with its context. * * @param topicName Name of topic to delete. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link TopicProperties}. */ Mono<Response<Void>> deleteTopicWithResponse(String topicName, Context context) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return entityClient.deleteWithResponseAsync(topicName, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets whether an entity exists. * * @param getEntityOperation Operation to get information about entity. If {@link ResourceNotFoundException} is * thrown, then it is mapped to false. * @param <T> Entity type. * * @return True if the entity exists, false otherwise. */ <T> Mono<Response<Boolean>> getEntityExistsWithResponse(Mono<Response<T>> getEntityOperation) { return getEntityOperation.map(response -> { final boolean exists = response.getValue() != null; return (Response<Boolean>) new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), exists); }) .onErrorResume(ResourceNotFoundException.class, exception -> { final HttpResponse response = exception.getResponse(); final Response<Boolean> result = new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), false); return Mono.just(result); }); } /** * Gets a queue with its context. * * @param queueName Name of queue to fetch information for. * @param context Context to pass into request. * * @return A Mono that completes with the {@link QueueProperties}. */ <T> Mono<Response<T>> getQueueWithResponse(String queueName, Context context, Function<QueueProperties, T> mapper) { if (queueName == null) { return monoError(logger, new NullPointerException("'queueName' cannot be null")); } else if (queueName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'queueName' cannot be empty.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return entityClient.getWithResponseAsync(queueName, true, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .handle((response, sink) -> { final Response<QueueProperties> deserialize = deserializeQueue(response); if (deserialize.getValue() == null) { final HttpResponse notFoundResponse = new EntityNotFoundHttpResponse<>(deserialize); sink.error(new ResourceNotFoundException(String.format("Queue '%s' does not exist.", queueName), notFoundResponse)); } else { final T mapped = mapper.apply(deserialize.getValue()); sink.next(new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), mapped)); } }); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<RuleProperties>> getRuleWithResponse(String topicName, String subscriptionName, String ruleName, Context context) { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return rulesClient.getWithResponseAsync(topicName, subscriptionName, ruleName, true, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(this::deserializeRule); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets a subscription with its context. * * @param topicName Name of the topic associated with the subscription. * @param subscriptionName Name of subscription to fetch information for. * @param context Context to pass into request. * * @return A Mono that completes with the {@link SubscriptionProperties}. */ <T> Mono<Response<T>> getSubscriptionWithResponse(String topicName, String subscriptionName, Context context, Function<SubscriptionProperties, T> mapper) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } else if (subscriptionName == null) { return monoError(logger, new NullPointerException("'subscriptionName' cannot be null.")); } else if (subscriptionName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'subscriptionName' cannot be an empty string.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return managementClient.getSubscriptions().getWithResponseAsync(topicName, subscriptionName, true, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .handle((response, sink) -> { final Response<SubscriptionProperties> deserialize = deserializeSubscription(topicName, response); if (deserialize.getValue() == null) { final HttpResponse notFoundResponse = new EntityNotFoundHttpResponse<>(deserialize); sink.error(new ResourceNotFoundException(String.format( "Subscription '%s' in topic '%s' does not exist.", topicName, subscriptionName), notFoundResponse)); } else { final T mapped = mapper.apply(deserialize.getValue()); sink.next(new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), mapped)); } }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets the namespace properties with its context. * * @param context Context to pass into request. * * @return A Mono that completes with the {@link NamespaceProperties}. */ Mono<Response<NamespaceProperties>> getNamespacePropertiesWithResponse(Context context) { return managementClient.getNamespaces().getWithResponseAsync(context).handle((response, sink) -> { final NamespacePropertiesEntry entry = response.getValue(); if (entry == null || entry.getContent() == null) { sink.error(new AzureException( "There was no content inside namespace response. Entry: " + response)); return; } final NamespaceProperties namespaceProperties = entry.getContent().getNamespaceProperties(); final Response<NamespaceProperties> result = new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), namespaceProperties); sink.next(result); }); } /** * Gets a topic with its context. * * @param topicName Name of topic to fetch information for. * @param context Context to pass into request. * * @return A Mono that completes with the {@link TopicProperties}. */ <T> Mono<Response<T>> getTopicWithResponse(String topicName, Context context, Function<TopicProperties, T> mapper) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be empty.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return entityClient.getWithResponseAsync(topicName, true, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .handle((response, sink) -> { final Response<TopicProperties> deserialize = deserializeTopic(response); if (deserialize.getValue() == null) { final HttpResponse notFoundResponse = new EntityNotFoundHttpResponse<>(deserialize); sink.error(new ResourceNotFoundException(String.format("Topic '%s' does not exist.", topicName), notFoundResponse)); } else { final T mapped = mapper.apply(deserialize.getValue()); sink.next(new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), mapped)); } }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets the first page of queues with context. * * @param context Context to pass into request. * * @return A Mono that completes with a page of queues. */ Mono<PagedResponse<QueueProperties>> listQueuesFirstPage(Context context) { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return listQueues(0, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the next page of queues with context. * * @param continuationToken Number of items to skip in feed. * @param context Context to pass into request. * * @return A Mono that completes with a page of queues or empty if there are no items left. */ Mono<PagedResponse<QueueProperties>> listQueuesNextPage(String continuationToken, Context context) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } try { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); final int skip = Integer.parseInt(continuationToken); return listQueues(skip, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the first page of rules with context. * * @param context Context to pass into request. * * @return A Mono that completes with a page of rules. */ Mono<PagedResponse<RuleProperties>> listRulesFirstPage(String topicName, String subscriptionName, Context context) { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return listRules(topicName, subscriptionName, 0, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the next page of rules with context. * * @param continuationToken Number of items to skip in feed. * @param context Context to pass into request. * * @return A Mono that completes with a page of rules or empty if there are no items left. */ Mono<PagedResponse<RuleProperties>> listRulesNextPage(String topicName, String subscriptionName, String continuationToken, Context context) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } try { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); final int skip = Integer.parseInt(continuationToken); return listRules(topicName, subscriptionName, skip, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the first page of subscriptions with context. * * @param context Context to pass into request. * * @return A Mono that completes with a page of subscriptions. */ Mono<PagedResponse<SubscriptionProperties>> listSubscriptionsFirstPage(String topicName, Context context) { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return listSubscriptions(topicName, 0, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the next page of subscriptions with context. * * @param continuationToken Number of items to skip in feed. * @param context Context to pass into request. * * @return A Mono that completes with a page of subscriptions or empty if there are no items left. */ Mono<PagedResponse<SubscriptionProperties>> listSubscriptionsNextPage(String topicName, String continuationToken, Context context) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } try { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); final int skip = Integer.parseInt(continuationToken); return listSubscriptions(topicName, skip, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the first page of topics with context. * * @param context Context to pass into request. * * @return A Mono that completes with a page of topics. */ Mono<PagedResponse<TopicProperties>> listTopicsFirstPage(Context context) { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return listTopics(0, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the next page of topics with context. * * @param continuationToken Number of items to skip in feed. * @param context Context to pass into request. * * @return A Mono that completes with a page of topics or empty if there are no items left. */ Mono<PagedResponse<TopicProperties>> listTopicsNextPage(String continuationToken, Context context) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } try { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); final int skip = Integer.parseInt(continuationToken); return listTopics(skip, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Updates a queue with its context. * * @param queue Information about the queue to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * @param context Context to pass into request. * * @return A Mono that completes with the updated {@link QueueProperties}. */ Mono<Response<QueueProperties>> updateQueueWithResponse(QueueProperties queue, Context context) { if (queue == null) { final Context contextWithHeaders = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE) .addData(AZURE_REQUEST_HTTP_HEADERS_KEY, new HttpHeaders()); final String forwardToEntity = queue.getForwardTo(); if (!CoreUtils.isNullOrEmpty(forwardToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardToEntity, contextWithHeaders); queue.setForwardTo(getAbsoluteUrlFromEntity(forwardToEntity)); } final String forwardDlqToEntity = queue.getForwardDeadLetteredMessagesTo(); if (!CoreUtils.isNullOrEmpty(forwardDlqToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_DLQ_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardDlqToEntity, contextWithHeaders); queue.setForwardDeadLetteredMessagesTo(getAbsoluteUrlFromEntity(forwardDlqToEntity)); } final QueueDescription queueDescription = EntityHelper.toImplementation(queue); final CreateQueueBodyContent content = new CreateQueueBodyContent() .setType(CONTENT_TYPE) .setQueueDescription(queueDescription); final CreateQueueBody createEntity = new CreateQueueBody() .setContent(content); try { return entityClient.putWithResponseAsync(queue.getName(), createEntity, "*", contextWithHeaders) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeQueue(response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates a rule with its context. * * @param rule Information about the rule to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * @param context Context to pass into request. * * @return A Mono that completes with the updated {@link RuleProperties}. */ Mono<Response<RuleProperties>> updateRuleWithResponse(String topicName, String subscriptionName, RuleProperties rule, Context context) { if (rule == null) { return monoError(logger, new NullPointerException("'rule' cannot be null")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final RuleDescription implementation = EntityHelper.toImplementation(rule); final CreateRuleBodyContent content = new CreateRuleBodyContent() .setType(CONTENT_TYPE) .setRuleDescription(implementation); final CreateRuleBody ruleBody = new CreateRuleBody() .setContent(content); final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return managementClient.getRules().putWithResponseAsync(topicName, subscriptionName, rule.getName(), ruleBody, "*", withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeRule(response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates a subscription with its context. * * @param subscription Information about the subscription to update. You must provide all the property values * that are desired on the updated entity. Any values not provided are set to the service default values. * @param context Context to pass into request. * * @return A Mono that completes with the updated {@link SubscriptionProperties}. */ Mono<Response<SubscriptionProperties>> updateSubscriptionWithResponse(SubscriptionProperties subscription, Context context) { if (subscription == null) { return monoError(logger, new NullPointerException("'subscription' cannot be null")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context contextWithHeaders = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE) .addData(AZURE_REQUEST_HTTP_HEADERS_KEY, new HttpHeaders()); final String forwardToEntity = subscription.getForwardTo(); if (!CoreUtils.isNullOrEmpty(forwardToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardToEntity, contextWithHeaders); subscription.setForwardTo(getAbsoluteUrlFromEntity(forwardToEntity)); } final String forwardDlqToEntity = subscription.getForwardDeadLetteredMessagesTo(); if (!CoreUtils.isNullOrEmpty(forwardDlqToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_DLQ_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardDlqToEntity, contextWithHeaders); subscription.setForwardDeadLetteredMessagesTo(getAbsoluteUrlFromEntity(forwardDlqToEntity)); } final String topicName = subscription.getTopicName(); final String subscriptionName = subscription.getSubscriptionName(); final SubscriptionDescription implementation = EntityHelper.toImplementation(subscription); final CreateSubscriptionBodyContent content = new CreateSubscriptionBodyContent() .setType(CONTENT_TYPE) .setSubscriptionDescription(implementation); final CreateSubscriptionBody createEntity = new CreateSubscriptionBody() .setContent(content); try { return managementClient.getSubscriptions().putWithResponseAsync(topicName, subscriptionName, createEntity, "*", contextWithHeaders) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeSubscription(topicName, response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates a topic with its context. * * @param topic Information about the topic to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * @param context Context to pass into request. * * @return A Mono that completes with the updated {@link TopicProperties}. */ Mono<Response<TopicProperties>> updateTopicWithResponse(TopicProperties topic, Context context) { if (topic == null) { return monoError(logger, new NullPointerException("'topic' cannot be null")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final TopicDescription implementation = EntityHelper.toImplementation(topic); final CreateTopicBodyContent content = new CreateTopicBodyContent() .setType(CONTENT_TYPE) .setTopicDescription(implementation); final CreateTopicBody createEntity = new CreateTopicBody() .setContent(content); final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return entityClient.putWithResponseAsync(topic.getName(), createEntity, "*", withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeTopic(response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } private <T> T deserialize(Object object, Class<T> clazz) { if (object == null) { return null; } final String contents = String.valueOf(object); if (contents.isEmpty()) { return null; } try { return serializer.deserialize(contents, clazz); } catch (IOException e) { throw logger.logExceptionAsError(new RuntimeException(String.format( "Exception while deserializing. Body: [%s]. Class: %s", contents, clazz), e)); } } /** * Given an HTTP response, will deserialize it into a strongly typed Response object. * * @param response HTTP response to deserialize response body from. * @param clazz Class to deserialize response type into. * @param <T> Class type to deserialize response into. * * @return A Response with a strongly typed response value. */ private <T> Response<T> deserialize(Response<Object> response, Class<T> clazz) { final T deserialize = deserialize(response.getValue(), clazz); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), deserialize); } /** * Converts a Response into its corresponding {@link QueueDescriptionEntry} then mapped into {@link * QueueProperties}. * * @param response HTTP Response to deserialize. * * @return The corresponding HTTP response with convenience properties set. */ private Response<QueueProperties> deserializeQueue(Response<Object> response) { final QueueDescriptionEntry entry = deserialize(response.getValue(), QueueDescriptionEntry.class); if (entry == null) { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent() == null) { logger.info("entry.getContent() is null. The entity may not exist. {}", entry); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent().getQueueDescription() == null) { final TopicDescriptionEntry entryTopic = deserialize(response.getValue(), TopicDescriptionEntry.class); if (entryTopic != null && entryTopic.getContent() != null && entryTopic.getContent().getTopicDescription() != null) { logger.warning("'{}' is not a queue, it is a topic.", entryTopic.getTitle()); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } } final QueueProperties result = EntityHelper.toModel(entry.getContent().getQueueDescription()); final String queueName = getTitleValue(entry.getTitle()); EntityHelper.setQueueName(result, queueName); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), result); } /** * Converts a Response into its corresponding {@link RuleDescriptionEntry} then mapped into {@link RuleProperties}. * * @param response HTTP Response to deserialize. * * @return The corresponding HTTP response with convenience properties set. */ private Response<RuleProperties> deserializeRule(Response<Object> response) { final RuleDescriptionEntry entry = deserialize(response.getValue(), RuleDescriptionEntry.class); if (entry == null) { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent() == null) { logger.info("entry.getContent() is null. The entity may not exist. {}", entry); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } final RuleDescription description = entry.getContent().getRuleDescription(); final RuleProperties result = EntityHelper.toModel(description); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), result); } /** * Converts a Response into its corresponding {@link SubscriptionDescriptionEntry} then mapped into {@link * SubscriptionProperties}. * * @param response HTTP Response to deserialize. * * @return The corresponding HTTP response with convenience properties set. */ private Response<SubscriptionProperties> deserializeSubscription(String topicName, Response<Object> response) { final SubscriptionDescriptionEntry entry = deserialize(response.getValue(), SubscriptionDescriptionEntry.class); if (entry == null) { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent() == null) { logger.warning("entry.getContent() is null. There should have been content returned. Entry: {}", entry); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } final SubscriptionProperties subscription = EntityHelper.toModel( entry.getContent().getSubscriptionDescription()); final String subscriptionName = getTitleValue(entry.getTitle()); EntityHelper.setSubscriptionName(subscription, subscriptionName); EntityHelper.setTopicName(subscription, topicName); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), subscription); } /** * Converts a Response into its corresponding {@link TopicDescriptionEntry} then mapped into {@link * QueueProperties}. * * @param response HTTP Response to deserialize. * * @return The corresponding HTTP response with convenience properties set. */ private Response<TopicProperties> deserializeTopic(Response<Object> response) { final TopicDescriptionEntry entry = deserialize(response.getValue(), TopicDescriptionEntry.class); if (entry == null) { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent() == null) { logger.warning("entry.getContent() is null. There should have been content returned. Entry: {}", entry); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent().getTopicDescription() == null) { final QueueDescriptionEntry entryQueue = deserialize(response.getValue(), QueueDescriptionEntry.class); if (entryQueue != null && entryQueue.getContent() != null && entryQueue.getContent().getQueueDescription() != null) { logger.warning("'{}' is not a topic, it is a queue.", entryQueue.getTitle()); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } } final TopicProperties result = EntityHelper.toModel(entry.getContent().getTopicDescription()); final String topicName = getTitleValue(entry.getTitle()); EntityHelper.setTopicName(result, topicName); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), result); } /** * Creates a {@link FeedPage} given the elements and a set of response links to get the next link from. * * @param entities Entities in the feed. * @param responseLinks Links returned from the feed. * @param <TResult> Type of Service Bus entities in page. * * @return A {@link FeedPage} indicating whether this can be continued or not. * @throws MalformedURLException if the "next" page link does not contain a well-formed URL. */ private <TResult, TFeed> FeedPage<TResult> extractPage(Response<TFeed> response, List<TResult> entities, List<ResponseLink> responseLinks) throws MalformedURLException, UnsupportedEncodingException { final Optional<ResponseLink> nextLink = responseLinks.stream() .filter(link -> link.getRel().equalsIgnoreCase("next")) .findFirst(); if (!nextLink.isPresent()) { return new FeedPage<>(response.getStatusCode(), response.getHeaders(), response.getRequest(), entities); } final URL url = new URL(nextLink.get().getHref()); final String decode = URLDecoder.decode(url.getQuery(), StandardCharsets.UTF_8.name()); final Optional<Integer> skipParameter = Arrays.stream(decode.split("&amp;|&")) .map(part -> part.split("=", 2)) .filter(parts -> parts[0].equalsIgnoreCase("$skip") && parts.length == 2) .map(parts -> Integer.valueOf(parts[1])) .findFirst(); if (skipParameter.isPresent()) { return new FeedPage<>(response.getStatusCode(), response.getHeaders(), response.getRequest(), entities, skipParameter.get()); } else { logger.warning("There should have been a skip parameter for the next page."); return new FeedPage<>(response.getStatusCode(), response.getHeaders(), response.getRequest(), entities); } } /** * Helper method that invokes the service method, extracts the data and translates it to a PagedResponse. * * @param skip Number of elements to skip. * @param context Context for the query. * * @return A Mono that completes with a paged response of queues. */ private Mono<PagedResponse<QueueProperties>> listQueues(int skip, Context context) { return managementClient.listEntitiesWithResponseAsync(QUEUES_ENTITY_TYPE, skip, NUMBER_OF_ELEMENTS, context) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .flatMap(response -> { final Response<QueueDescriptionFeed> feedResponse = deserialize(response, QueueDescriptionFeed.class); final QueueDescriptionFeed feed = feedResponse.getValue(); if (feed == null) { logger.warning("Could not deserialize QueueDescriptionFeed. skip {}, top: {}", skip, NUMBER_OF_ELEMENTS); return Mono.empty(); } final List<QueueProperties> entities = feed.getEntry().stream() .filter(e -> e.getContent() != null && e.getContent().getQueueDescription() != null) .map(e -> { final String queueName = getTitleValue(e.getTitle()); final QueueProperties queueProperties = EntityHelper.toModel( e.getContent().getQueueDescription()); EntityHelper.setQueueName(queueProperties, queueName); return queueProperties; }) .collect(Collectors.toList()); try { return Mono.just(extractPage(feedResponse, entities, feed.getLink())); } catch (MalformedURLException | UnsupportedEncodingException error) { return Mono.error(new RuntimeException("Could not parse response into FeedPage<QueueDescription>", error)); } }); } /** * Helper method that invokes the service method, extracts the data and translates it to a PagedResponse. * * @param skip Number of elements to skip. * @param context Context for the query. * * @return A Mono that completes with a paged response of rules. */ private Mono<PagedResponse<RuleProperties>> listRules(String topicName, String subscriptionName, int skip, Context context) { return managementClient.listRulesWithResponseAsync(topicName, subscriptionName, skip, NUMBER_OF_ELEMENTS, context) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .flatMap(response -> { final Response<RuleDescriptionFeed> feedResponse = deserialize(response, RuleDescriptionFeed.class); final RuleDescriptionFeed feed = feedResponse.getValue(); if (feed == null) { logger.warning("Could not deserialize RuleDescriptionFeed. skip {}, top: {}", skip, NUMBER_OF_ELEMENTS); return Mono.empty(); } final List<RuleProperties> entities = feed.getEntry().stream() .filter(e -> e.getContent() != null && e.getContent().getRuleDescription() != null) .map(e -> { return EntityHelper.toModel(e.getContent().getRuleDescription()); }) .collect(Collectors.toList()); try { return Mono.just(extractPage(feedResponse, entities, feed.getLink())); } catch (MalformedURLException | UnsupportedEncodingException error) { return Mono.error(new RuntimeException( "Could not parse response into FeedPage<RuleDescription>", error)); } }); } /** * Helper method that invokes the service method, extracts the data and translates it to a PagedResponse. * * @param skip Number of elements to skip. * @param context Context for the query. * * @return A Mono that completes with a paged response of subscriptions. */ private Mono<PagedResponse<SubscriptionProperties>> listSubscriptions(String topicName, int skip, Context context) { return managementClient.listSubscriptionsWithResponseAsync(topicName, skip, NUMBER_OF_ELEMENTS, context) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .flatMap(response -> { final Response<SubscriptionDescriptionFeed> feedResponse = deserialize(response, SubscriptionDescriptionFeed.class); final SubscriptionDescriptionFeed feed = feedResponse.getValue(); if (feed == null) { logger.warning("Could not deserialize SubscriptionDescriptionFeed. skip {}, top: {}", skip, NUMBER_OF_ELEMENTS); return Mono.empty(); } final List<SubscriptionProperties> entities = feed.getEntry().stream() .filter(e -> e.getContent() != null && e.getContent().getSubscriptionDescription() != null) .map(e -> { final String subscriptionName = getTitleValue(e.getTitle()); final SubscriptionProperties description = EntityHelper.toModel( e.getContent().getSubscriptionDescription()); EntityHelper.setTopicName(description, topicName); EntityHelper.setSubscriptionName(description, subscriptionName); return description; }) .collect(Collectors.toList()); try { return Mono.just(extractPage(feedResponse, entities, feed.getLink())); } catch (MalformedURLException | UnsupportedEncodingException error) { return Mono.error(new RuntimeException( "Could not parse response into FeedPage<SubscriptionDescription>", error)); } }); } /** * Helper method that invokes the service method, extracts the data and translates it to a PagedResponse. * * @param skip Number of elements to skip. * @param context Context for the query. * * @return A Mono that completes with a paged response of topics. */ private Mono<PagedResponse<TopicProperties>> listTopics(int skip, Context context) { return managementClient.listEntitiesWithResponseAsync(TOPICS_ENTITY_TYPE, skip, NUMBER_OF_ELEMENTS, context) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .flatMap(response -> { final Response<TopicDescriptionFeed> feedResponse = deserialize(response, TopicDescriptionFeed.class); final TopicDescriptionFeed feed = feedResponse.getValue(); if (feed == null) { logger.warning("Could not deserialize TopicDescriptionFeed. skip {}, top: {}", skip, NUMBER_OF_ELEMENTS); return Mono.empty(); } final List<TopicProperties> entities = feed.getEntry().stream() .filter(e -> e.getContent() != null && e.getContent().getTopicDescription() != null) .map(e -> { final String topicName = getTitleValue(e.getTitle()); final TopicProperties topicProperties = EntityHelper.toModel( e.getContent().getTopicDescription()); EntityHelper.setTopicName(topicProperties, topicName); return topicProperties; }) .collect(Collectors.toList()); try { return Mono.just(extractPage(feedResponse, entities, feed.getLink())); } catch (MalformedURLException | UnsupportedEncodingException error) { return Mono.error(new RuntimeException("Could not parse response into FeedPage<TopicDescription>", error)); } }); } /** * Check that the additional headers field is present and add the additional auth header * * @param headerName name of the header to be added * @param context current request context * * @return boolean representing the outcome of adding header operation */ private void addSupplementaryAuthHeader(String headerName, String entity, Context context) { context.getData(AZURE_REQUEST_HTTP_HEADERS_KEY) .ifPresent(headers -> { if (headers instanceof HttpHeaders) { HttpHeaders customHttpHeaders = (HttpHeaders) headers; customHttpHeaders.add(headerName, entity); } }); } /** * Checks if the given entity is an absolute URL, if so return it. * Otherwise, construct the URL from the given entity and return that. * * @param entity : entity to forward messages to. * * @return Forward to Entity represented as an absolute URL */ private String getAbsoluteUrlFromEntity(String entity) { try { URL url = new URL(entity); return url.toString(); } catch (MalformedURLException ex) { } UrlBuilder urlBuilder = new UrlBuilder(); urlBuilder.setScheme("https"); urlBuilder.setHost(managementClient.getEndpoint()); urlBuilder.setPath(entity); try { URL url = urlBuilder.toUrl(); return url.toString(); } catch (MalformedURLException ex) { logger.error("Failed to construct URL using the endpoint:'{}' and entity:'{}'", managementClient.getEndpoint(), entity); logger.logThrowableAsError(ex); } return null; } /** * Given an XML title element, returns the XML text inside. Jackson deserializes Objects as LinkedHashMaps. XML text * is represented as an entry with an empty string as the key. * * For example, the text returned from this {@code <title text="text/xml">QueueName</title>} is "QueueName". * * @param responseTitle XML title element. * * @return The XML text inside the title. {@code null} is returned if there is no value. */ @SuppressWarnings("unchecked") private String getTitleValue(Object responseTitle) { if (!(responseTitle instanceof Map)) { return null; } final Map<String, String> map; try { map = (Map<String, String>) responseTitle; return map.get(""); } catch (ClassCastException error) { logger.warning("Unable to cast to Map<String,String>. Title: {}", responseTitle, error); return null; } } /** * Maps an exception from the ATOM APIs to its associated {@link HttpResponseException}. * * @param exception Exception from the ATOM API. * * @return The corresponding {@link HttpResponseException} or {@code throwable} if it is not an instance of {@link * ServiceBusManagementErrorException}. */ private static Throwable mapException(Throwable exception) { if (!(exception instanceof ServiceBusManagementErrorException)) { return exception; } final ServiceBusManagementErrorException managementError = ((ServiceBusManagementErrorException) exception); final ServiceBusManagementError error = managementError.getValue(); final HttpResponse errorHttpResponse = managementError.getResponse(); final int statusCode = error != null && error.getCode() != null ? error.getCode() : errorHttpResponse.getStatusCode(); final String errorDetail = error != null && error.getDetail() != null ? error.getDetail() : managementError.getMessage(); switch (statusCode) { case 401: return new ClientAuthenticationException(errorDetail, managementError.getResponse(), exception); case 404: return new ResourceNotFoundException(errorDetail, managementError.getResponse(), exception); case 409: return new ResourceExistsException(errorDetail, managementError.getResponse(), exception); case 412: return new ResourceModifiedException(errorDetail, managementError.getResponse(), exception); default: return new HttpResponseException(errorDetail, managementError.getResponse(), exception); } } /** * A page of Service Bus entities. * * @param <T> The entity description from Service Bus. */ private static final class FeedPage<T> implements PagedResponse<T> { private final int statusCode; private final HttpHeaders header; private final HttpRequest request; private final IterableStream<T> entries; private final String continuationToken; /** * Creates a page that does not have any more pages. * * @param entries Items in the page. */ private FeedPage(int statusCode, HttpHeaders header, HttpRequest request, List<T> entries) { this.statusCode = statusCode; this.header = header; this.request = request; this.entries = new IterableStream<>(entries); this.continuationToken = null; } /** * Creates an instance that has additional pages to fetch. * * @param entries Items in the page. * @param skip Number of elements to "skip". */ private FeedPage(int statusCode, HttpHeaders header, HttpRequest request, List<T> entries, int skip) { this.statusCode = statusCode; this.header = header; this.request = request; this.entries = new IterableStream<>(entries); this.continuationToken = String.valueOf(skip); } @Override public IterableStream<T> getElements() { return entries; } @Override public String getContinuationToken() { return continuationToken; } @Override public int getStatusCode() { return statusCode; } @Override public HttpHeaders getHeaders() { return header; } @Override public HttpRequest getRequest() { return request; } @Override public void close() { } } private static final class EntityNotFoundHttpResponse<T> extends HttpResponse { private final int statusCode; private final HttpHeaders headers; private EntityNotFoundHttpResponse(Response<T> response) { super(response.getRequest()); this.headers = response.getHeaders(); this.statusCode = response.getStatusCode(); } @Override public int getStatusCode() { return statusCode; } @Override public String getHeaderValue(String name) { return headers.getValue(name); } @Override public HttpHeaders getHeaders() { return headers; } @Override public Flux<ByteBuffer> getBody() { return Flux.empty(); } @Override public Mono<byte[]> getBodyAsByteArray() { return Mono.empty(); } @Override public Mono<String> getBodyAsString() { return Mono.empty(); } @Override public Mono<String> getBodyAsString(Charset charset) { return Mono.empty(); } } }
same with using fluentAPI
Mono<Response<QueueProperties>> updateQueueWithResponse(QueueProperties queue, Context context) { if (queue == null) { return monoError(logger, new NullPointerException("'queue' cannot be null")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context contextWithTrace = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); final Context contextWithHeaders = contextWithTrace.addData(AZURE_REQUEST_HTTP_HEADERS_KEY, new HttpHeaders()); final String forwardToEntity = queue.getForwardTo(); if (!CoreUtils.isNullOrEmpty(forwardToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardToEntity, contextWithHeaders); queue.setForwardTo(getAbsoluteUrlFromEntity(forwardToEntity)); } final String forwardDlqToEntity = queue.getForwardDeadLetteredMessagesTo(); if (!CoreUtils.isNullOrEmpty(forwardDlqToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_DLQ_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardDlqToEntity, contextWithHeaders); queue.setForwardDeadLetteredMessagesTo(getAbsoluteUrlFromEntity(forwardDlqToEntity)); } final QueueDescription queueDescription = EntityHelper.toImplementation(queue); final CreateQueueBodyContent content = new CreateQueueBodyContent() .setType(CONTENT_TYPE) .setQueueDescription(queueDescription); final CreateQueueBody createEntity = new CreateQueueBody() .setContent(content); try { return entityClient.putWithResponseAsync(queue.getName(), createEntity, "*", contextWithHeaders) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeQueue(response)); } catch (RuntimeException ex) { return monoError(logger, ex); } }
final Context contextWithHeaders = contextWithTrace.addData(AZURE_REQUEST_HTTP_HEADERS_KEY, new HttpHeaders());
return monoError(logger, new NullPointerException("'queue' cannot be null")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); }
class ServiceBusAdministrationAsyncClient { private static final String CONTENT_TYPE = "application/xml"; private static final String QUEUES_ENTITY_TYPE = "queues"; private static final String TOPICS_ENTITY_TYPE = "topics"; private static final int NUMBER_OF_ELEMENTS = 100; private final ServiceBusManagementClientImpl managementClient; private final EntitiesImpl entityClient; private final ClientLogger logger = new ClientLogger(ServiceBusAdministrationAsyncClient.class); private final ServiceBusManagementSerializer serializer; private final RulesImpl rulesClient; /** * Creates a new instance with the given management client and serializer. * * @param managementClient Client to make management calls. * @param serializer Serializer to deserialize ATOM XML responses. * * @throws NullPointerException if any one of {@code managementClient, serializer, credential} is null. */ ServiceBusAdministrationAsyncClient(ServiceBusManagementClientImpl managementClient, ServiceBusManagementSerializer serializer) { this.serializer = Objects.requireNonNull(serializer, "'serializer' cannot be null."); this.managementClient = Objects.requireNonNull(managementClient, "'managementClient' cannot be null."); this.entityClient = managementClient.getEntities(); this.rulesClient = managementClient.getRules(); } /** * Creates a queue with the given name. * * @param queueName Name of the queue to create. * * @return A Mono that completes with information about the created queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code queueName} is null. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws ResourceExistsException if a queue exists with the same {@code queueName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueProperties> createQueue(String queueName) { try { return createQueue(queueName, new CreateQueueOptions()); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Creates a queue with the {@link CreateQueueOptions} and given queue name. * * @param queueName Name of the queue to create. * @param queueOptions Options about the queue to create. * * @return A Mono that completes with information about the created queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} or {@code queueOptions} is null. * @throws ResourceExistsException if a queue exists with the same {@link QueueProperties * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueProperties> createQueue(String queueName, CreateQueueOptions queueOptions) { return createQueueWithResponse(queueName, queueOptions).map(Response::getValue); } /** * Creates a queue and returns the created queue in addition to the HTTP response. * * @param queueName Name of the queue to create. * @param queueOptions Options about the queue to create. * * @return A Mono that returns the created queue in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} or {@code queueOptions} is null. * @throws ResourceExistsException if a queue exists with the same {@link QueueProperties * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<QueueProperties>> createQueueWithResponse(String queueName, CreateQueueOptions queueOptions) { return withContext(context -> createQueueWithResponse(queueName, queueOptions, context)); } /** * Creates a rule under the given topic and subscription * * @param topicName Name of the topic associated with rule. * @param subscriptionName Name of the subscription associated with the rule. * @param ruleName Name of the rule. * * @return A Mono that completes with information about the created rule. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} are are empty strings. * @throws NullPointerException if {@code topicName} or {@code ruleName} are are null. * @throws ResourceExistsException if a rule exists with the same topic, subscription, and rule name. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<RuleProperties> createRule(String topicName, String subscriptionName, String ruleName) { try { return createRule(topicName, subscriptionName, ruleName, new CreateRuleOptions()); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Creates a rule with the {@link CreateRuleOptions}. * * @param topicName Name of the topic associated with rule. * @param subscriptionName Name of the subscription associated with the rule. * @param ruleName Name of the rule. * @param ruleOptions Information about the rule to create. * * @return A Mono that completes with information about the created rule. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} are are empty strings. * @throws NullPointerException if {@code topicName}, {@code ruleName}, or {@code ruleOptions} * are are null. * @throws ResourceExistsException if a rule exists with the same topic and rule name. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<RuleProperties> createRule(String topicName, String subscriptionName, String ruleName, CreateRuleOptions ruleOptions) { return createRuleWithResponse(topicName, subscriptionName, ruleName, ruleOptions) .map(Response::getValue); } /** * Creates a rule and returns the created rule in addition to the HTTP response. * * @param topicName Name of the topic associated with rule. * @param subscriptionName Name of the subscription associated with the rule. * @param ruleName Name of the rule. * @param ruleOptions Information about the rule to create. * * @return A Mono that returns the created rule in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} are are empty strings. * @throws NullPointerException if {@code topicName}, {@code ruleName}, or {@code ruleOptions} * are are null. * @throws ResourceExistsException if a rule exists with the same topic and rule name. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<RuleProperties>> createRuleWithResponse(String topicName, String subscriptionName, String ruleName, CreateRuleOptions ruleOptions) { return withContext(context -> createRuleWithResponse(topicName, subscriptionName, ruleName, ruleOptions, context)); } /** * Creates a subscription with the given topic and subscription names. * * @param topicName Name of the topic associated with subscription. * @param subscriptionName Name of the subscription. * * @return A Mono that completes with information about the created subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are are empty strings. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} are are null. * @throws ResourceExistsException if a subscription exists with the same topic and subscription name. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionProperties> createSubscription(String topicName, String subscriptionName) { try { return createSubscription(topicName, subscriptionName, new CreateSubscriptionOptions()); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Creates a subscription with the {@link CreateSubscriptionOptions}. * * @param topicName Name of the topic associated with subscription. * @param subscriptionName Name of the subscription. * @param subscriptionOptions Information about the subscription to create. * * @return A Mono that completes with information about the created subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are are empty strings. * @throws NullPointerException if {@code topicName}, {@code subscriptionName}, or {@code subscriptionOptions} * are are null. * @throws ResourceExistsException if a subscription exists with the same topic and subscription name. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionProperties> createSubscription(String topicName, String subscriptionName, CreateSubscriptionOptions subscriptionOptions) { return createSubscriptionWithResponse(topicName, subscriptionName, subscriptionOptions) .map(Response::getValue); } /** * Creates a subscription and returns the created subscription in addition to the HTTP response. * * @param topicName Name of the topic associated with subscription. * @param subscriptionName Name of the subscription. * @param subscriptionOptions Information about the subscription to create. * * @return A Mono that returns the created subscription in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are are empty strings. * @throws NullPointerException if {@code topicName}, {@code subscriptionName}, or {@code subscriptionOptions} * are are null. * @throws ResourceExistsException if a subscription exists with the same topic and subscription name. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SubscriptionProperties>> createSubscriptionWithResponse(String topicName, String subscriptionName, CreateSubscriptionOptions subscriptionOptions) { return withContext(context -> createSubscriptionWithResponse(topicName, subscriptionName, subscriptionOptions, context)); } /** * Creates a topic with the given name. * * @param topicName Name of the topic to create. * * @return A Mono that completes with information about the created topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code topicName} is null. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws ResourceExistsException if a topic exists with the same {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicProperties> createTopic(String topicName) { try { return createTopic(topicName, new CreateTopicOptions()); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Creates a topic with the {@link CreateTopicOptions}. * * @param topicName Name of the topic to create. * @param topicOptions The options used to create the topic. * * @return A Mono that completes with information about the created topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code topicOptions} is null. * @throws ResourceExistsException if a topic exists with the same {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicProperties> createTopic(String topicName, CreateTopicOptions topicOptions) { return createTopicWithResponse(topicName, topicOptions).map(Response::getValue); } /** * Creates a topic and returns the created topic in addition to the HTTP response. * * @param topicName Name of the topic to create. * @param topicOptions The options used to create the topic. * * @return A Mono that returns the created topic in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link TopicProperties * string. * @throws NullPointerException if {@code topicName} or {@code topicOptions} is null. * @throws ResourceExistsException if a topic exists with the same {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<TopicProperties>> createTopicWithResponse(String topicName, CreateTopicOptions topicOptions) { return withContext(context -> createTopicWithResponse(topicName, topicOptions, context)); } /** * Deletes a queue the matching {@code queueName}. * * @param queueName Name of queue to delete. * * @return A Mono that completes when the queue is deleted. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws NullPointerException if {@code queueName} is null. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteQueue(String queueName) { return deleteQueueWithResponse(queueName).then(); } /** * Deletes a queue the matching {@code queueName} and returns the HTTP response. * * @param queueName Name of queue to delete. * * @return A Mono that completes when the queue is deleted and returns the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws NullPointerException if {@code queueName} is null. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteQueueWithResponse(String queueName) { return withContext(context -> deleteQueueWithResponse(queueName, context)); } /** * Deletes a rule the matching {@code ruleName}. * * @param topicName Name of topic associated with rule to delete. * @param subscriptionName Name of the subscription associated with the rule to delete. * @param ruleName Name of rule to delete. * * @return A Mono that completes when the rule is deleted. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code ruleName} is null. * @throws ResourceNotFoundException if the {@code ruleName} does not exist. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteRule(String topicName, String subscriptionName, String ruleName) { return deleteRuleWithResponse(topicName, subscriptionName, ruleName).then(); } /** * Deletes a rule the matching {@code ruleName} and returns the HTTP response. * * @param topicName Name of topic associated with rule to delete. * @param subscriptionName Name of the subscription associated with the rule to delete. * @param ruleName Name of rule to delete. * * @return A Mono that completes when the rule is deleted and returns the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName}, {@code subscriptionName}, or {@code ruleName} is an * empty string. * @throws NullPointerException if {@code topicName}, {@code subscriptionName}, or {@code ruleName} is null. * @throws ResourceNotFoundException if the {@code ruleName} does not exist. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteRuleWithResponse(String topicName, String subscriptionName, String ruleName) { return withContext(context -> deleteRuleWithResponse(topicName, subscriptionName, ruleName, context)); } /** * Deletes a subscription the matching {@code subscriptionName}. * * @param topicName Name of topic associated with subscription to delete. * @param subscriptionName Name of subscription to delete. * * @return A Mono that completes when the subscription is deleted. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} is null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteSubscription(String topicName, String subscriptionName) { return deleteSubscriptionWithResponse(topicName, subscriptionName).then(); } /** * Deletes a subscription the matching {@code subscriptionName} and returns the HTTP response. * * @param topicName Name of topic associated with subscription to delete. * @param subscriptionName Name of subscription to delete. * * @return A Mono that completes when the subscription is deleted and returns the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} is null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteSubscriptionWithResponse(String topicName, String subscriptionName) { return withContext(context -> deleteSubscriptionWithResponse(topicName, subscriptionName, context)); } /** * Deletes a topic the matching {@code topicName}. * * @param topicName Name of topic to delete. * * @return A Mono that completes when the topic is deleted. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteTopic(String topicName) { return deleteTopicWithResponse(topicName).then(); } /** * Deletes a topic the matching {@code topicName} and returns the HTTP response. * * @param topicName Name of topic to delete. * * @return A Mono that completes when the topic is deleted and returns the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteTopicWithResponse(String topicName) { return withContext(context -> deleteTopicWithResponse(topicName, context)); } /** * Gets information about the queue. * * @param queueName Name of queue to get information about. * * @return A Mono that completes with information about the queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueProperties> getQueue(String queueName) { return getQueueWithResponse(queueName).map(Response::getValue); } /** * Gets information about the queue along with its HTTP response. * * @param queueName Name of queue to get information about. * * @return A Mono that completes with information about the queue and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<QueueProperties>> getQueueWithResponse(String queueName) { return withContext(context -> getQueueWithResponse(queueName, context, Function.identity())); } /** * Gets whether or not a queue with {@code queueName} exists in the Service Bus namespace. * * @param queueName Name of the queue. * * @return A Mono that completes indicating whether or not the queue exists. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Boolean> getQueueExists(String queueName) { return getQueueExistsWithResponse(queueName).map(Response::getValue); } /** * Gets whether or not a queue with {@code queueName} exists in the Service Bus namespace. * * @param queueName Name of the queue. * * @return A Mono that completes indicating whether or not the queue exists along with its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Boolean>> getQueueExistsWithResponse(String queueName) { return getEntityExistsWithResponse(getQueueWithResponse(queueName)); } /** * Gets runtime properties about the queue. * * @param queueName Name of queue to get information about. * * @return A Mono that completes with runtime properties about the queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueRuntimeProperties> getQueueRuntimeProperties(String queueName) { return getQueueRuntimePropertiesWithResponse(queueName).map(response -> response.getValue()); } /** * Gets runtime properties about the queue along with its HTTP response. * * @param queueName Name of queue to get information about. * * @return A Mono that completes with runtime properties about the queue and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<QueueRuntimeProperties>> getQueueRuntimePropertiesWithResponse(String queueName) { return withContext(context -> getQueueWithResponse(queueName, context, QueueRuntimeProperties::new)); } /** * Gets information about the Service Bus namespace. * * @return A Mono that completes with information about the Service Bus namespace. * @throws ClientAuthenticationException if the client's credentials do not have access to the namespace. * @throws HttpResponseException If error occurred processing the request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<NamespaceProperties> getNamespaceProperties() { return getNamespacePropertiesWithResponse().map(Response::getValue); } /** * Gets information about the Service Bus namespace along with its HTTP response. * * @return A Mono that completes with information about the namespace and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<NamespaceProperties>> getNamespacePropertiesWithResponse() { return withContext(this::getNamespacePropertiesWithResponse); } /** * Gets a rule from the service namespace. * * Only following data types are deserialized in Filters and Action parameters - string, int, long, boolean, double, * and OffsetDateTime. Other data types would return its string value. * * @param topicName The name of the topic relative to service bus namespace. * @param subscriptionName The subscription name the rule belongs to. * @param ruleName The name of the rule to retrieve. * * @return The associated rule. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<RuleProperties> getRule(String topicName, String subscriptionName, String ruleName) { return getRuleWithResponse(topicName, subscriptionName, ruleName).map(response -> response.getValue()); } /** * Gets a rule from the service namespace. * * Only following data types are deserialized in Filters and Action parameters - string, int, long, bool, double, * and OffsetDateTime. Other data types would return its string value. * * @param topicName The name of the topic relative to service bus namespace. * @param subscriptionName The subscription name the rule belongs to. * @param ruleName The name of the rule to retrieve. * * @return The associated rule with the corresponding HTTP response. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<RuleProperties>> getRuleWithResponse(String topicName, String subscriptionName, String ruleName) { return withContext(context -> getRuleWithResponse(topicName, subscriptionName, ruleName, context)); } /** * Gets information about the queue. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return A Mono that completes with information about the subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are empty strings. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} are null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist in the {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionProperties> getSubscription(String topicName, String subscriptionName) { return getSubscriptionWithResponse(topicName, subscriptionName).map(Response::getValue); } /** * Gets information about the subscription along with its HTTP response. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return A Mono that completes with information about the subscription and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are empty strings. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} are null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SubscriptionProperties>> getSubscriptionWithResponse(String topicName, String subscriptionName) { return withContext(context -> getSubscriptionWithResponse(topicName, subscriptionName, context, Function.identity())); } /** * Gets whether or not a subscription within a topic exists. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of the subscription. * * @return A Mono that completes indicating whether or not the subscription exists. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code subscriptionName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Boolean> getSubscriptionExists(String topicName, String subscriptionName) { return getSubscriptionExistsWithResponse(topicName, subscriptionName).map(Response::getValue); } /** * Gets whether or not a subscription within a topic exists. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of the subscription. * * @return A Mono that completes indicating whether or not the subscription exists along with its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code subscriptionName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Boolean>> getSubscriptionExistsWithResponse(String topicName, String subscriptionName) { return getEntityExistsWithResponse(getSubscriptionWithResponse(topicName, subscriptionName)); } /** * Gets runtime properties about the subscription. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return A Mono that completes with runtime properties about the subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are empty strings. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} are null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionRuntimeProperties> getSubscriptionRuntimeProperties( String topicName, String subscriptionName) { return getSubscriptionRuntimePropertiesWithResponse(topicName, subscriptionName) .map(response -> response.getValue()); } /** * Gets runtime properties about the subscription. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return A Mono that completes with runtime properties about the subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code subscriptionName} is null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SubscriptionRuntimeProperties>> getSubscriptionRuntimePropertiesWithResponse( String topicName, String subscriptionName) { return withContext(context -> getSubscriptionWithResponse(topicName, subscriptionName, context, SubscriptionRuntimeProperties::new)); } /** * Gets information about the topic. * * @param topicName Name of topic to get information about. * * @return A Mono that completes with information about the topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicProperties> getTopic(String topicName) { return getTopicWithResponse(topicName).map(Response::getValue); } /** * Gets information about the topic along with its HTTP response. * * @param topicName Name of topic to get information about. * * @return A Mono that completes with information about the topic and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<TopicProperties>> getTopicWithResponse(String topicName) { return withContext(context -> getTopicWithResponse(topicName, context, Function.identity())); } /** * Gets whether or not a topic with {@code topicName} exists in the Service Bus namespace. * * @param topicName Name of the topic. * * @return A Mono that completes indicating whether or not the topic exists. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Boolean> getTopicExists(String topicName) { return getTopicExistsWithResponse(topicName).map(Response::getValue); } /** * Gets whether or not a topic with {@code topicName} exists in the Service Bus namespace. * * @param topicName Name of the topic. * * @return A Mono that completes indicating whether or not the topic exists along with its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Boolean>> getTopicExistsWithResponse(String topicName) { return getEntityExistsWithResponse(getTopicWithResponse(topicName)); } /** * Gets runtime properties about the topic. * * @param topicName Name of topic to get information about. * * @return A Mono that completes with runtime properties about the topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicRuntimeProperties> getTopicRuntimeProperties(String topicName) { return getTopicRuntimePropertiesWithResponse(topicName).map(response -> response.getValue()); } /** * Gets runtime properties about the topic with its HTTP response. * * @param topicName Name of topic to get information about. * * @return A Mono that completes with runtime properties about the topic and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<TopicRuntimeProperties>> getTopicRuntimePropertiesWithResponse(String topicName) { return withContext(context -> getTopicWithResponse(topicName, context, TopicRuntimeProperties::new)); } /** * Fetches all the queues in the Service Bus namespace. * * @return A Flux of {@link QueueProperties queues} in the Service Bus namespace. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<QueueProperties> listQueues() { return new PagedFlux<>( () -> withContext(context -> listQueuesFirstPage(context)), token -> withContext(context -> listQueuesNextPage(token, context))); } /** * Fetches all the rules for a topic and subscription. * * @param topicName The topic name under which all the rules need to be retrieved. * @param subscriptionName The name of the subscription for which all rules need to be retrieved. * * @return A Flux of {@link RuleProperties rules} for the {@code topicName} and {@code subscriptionName}. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} is null. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} is an empty string. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<RuleProperties> listRules(String topicName, String subscriptionName) { if (topicName == null) { return pagedFluxError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return pagedFluxError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } return new PagedFlux<>( () -> withContext(context -> listRulesFirstPage(topicName, subscriptionName, context)), token -> withContext(context -> listRulesNextPage(topicName, subscriptionName, token, context))); } /** * Fetches all the subscriptions for a topic. * * @param topicName The topic name under which all the subscriptions need to be retrieved. * * @return A Flux of {@link SubscriptionProperties subscriptions} for the {@code topicName}. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws NullPointerException if {@code topicName} is null. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<SubscriptionProperties> listSubscriptions(String topicName) { if (topicName == null) { return pagedFluxError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return pagedFluxError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } return new PagedFlux<>( () -> withContext(context -> listSubscriptionsFirstPage(topicName, context)), token -> withContext(context -> listSubscriptionsNextPage(topicName, token, context))); } /** * Fetches all the topics in the Service Bus namespace. * * @return A Flux of {@link TopicProperties topics} in the Service Bus namespace. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<TopicProperties> listTopics() { return new PagedFlux<>( () -> withContext(context -> listTopicsFirstPage(context)), token -> withContext(context -> listTopicsNextPage(token, context))); } /** * Updates a queue with the given {@link QueueProperties}. The {@link QueueProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link QueueProperties * <li>{@link QueueProperties * <li>{@link QueueProperties * </li> * <li>{@link QueueProperties * </ul> * * @param queue Information about the queue to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that completes with the updated queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code queue} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueProperties> updateQueue(QueueProperties queue) { return updateQueueWithResponse(queue).map(Response::getValue); } /** * Updates a queue with the given {@link QueueProperties}. The {@link QueueProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link QueueProperties * <li>{@link QueueProperties * <li>{@link QueueProperties * </li> * <li>{@link QueueProperties * </ul> * * @param queue Information about the queue to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated queue in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code queue} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<QueueProperties>> updateQueueWithResponse(QueueProperties queue) { return withContext(context -> updateQueueWithResponse(queue, context)); } /** * Updates a rule with the given {@link RuleProperties}. The {@link RuleProperties} must be fully populated as all * of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * @param topicName The topic name under which the rule is updated. * @param subscriptionName The name of the subscription for which the rule is updated. * @param rule Information about the rule to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated rule. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the rule quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link RuleProperties * @throws NullPointerException if {@code rule} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<RuleProperties> updateRule(String topicName, String subscriptionName, RuleProperties rule) { return updateRuleWithResponse(topicName, subscriptionName, rule).map(Response::getValue); } /** * Updates a rule with the given {@link RuleProperties}. The {@link RuleProperties} must be fully populated as all * of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * @param topicName The topic name under which the rule is updated. * @param subscriptionName The name of the subscription for which the rule is updated. * @param rule Information about the rule to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated rule in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the rule quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link RuleProperties * @throws NullPointerException if {@code rule} is null. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<RuleProperties>> updateRuleWithResponse(String topicName, String subscriptionName, RuleProperties rule) { return withContext(context -> updateRuleWithResponse(topicName, subscriptionName, rule, context)); } /** * Updates a subscription with the given {@link SubscriptionProperties}. The {@link SubscriptionProperties} must be * fully populated as all of the properties are replaced. If a property is not set the service default value is * used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * </ul> * * @param subscription Information about the subscription to update. You must provide all the property values * that are desired on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the subscription quota is exceeded, or an * error occurred processing the request. * @throws IllegalArgumentException if {@link SubscriptionProperties * SubscriptionProperties * @throws NullPointerException if {@code subscription} is null. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionProperties> updateSubscription(SubscriptionProperties subscription) { return updateSubscriptionWithResponse(subscription).map(Response::getValue); } /** * Updates a subscription with the given {@link SubscriptionProperties}. The {@link SubscriptionProperties} must be * fully populated as all of the properties are replaced. If a property is not set the service default value is * used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * </ul> * * @param subscription Information about the subscription to update. You must provide all the property values * that are desired on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated subscription in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the subscription quota is exceeded, or an * error occurred processing the request. * @throws IllegalArgumentException if {@link SubscriptionProperties * SubscriptionProperties * @throws NullPointerException if {@code subscription} is null. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SubscriptionProperties>> updateSubscriptionWithResponse( SubscriptionProperties subscription) { return withContext(context -> updateSubscriptionWithResponse(subscription, context)); } /** * Updates a topic with the given {@link TopicProperties}. The {@link TopicProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link TopicProperties * <li>{@link TopicProperties * </li> * </ul> * * @param topic Information about the topic to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that completes with the updated topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link TopicProperties * string. * @throws NullPointerException if {@code topic} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicProperties> updateTopic(TopicProperties topic) { return updateTopicWithResponse(topic).map(Response::getValue); } /** * Updates a topic with the given {@link TopicProperties}. The {@link TopicProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link TopicProperties * <li>{@link TopicProperties * </li> * </ul> * * @param topic Information about the topic to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that completes with the updated topic and its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link TopicProperties * string. * @throws NullPointerException if {@code topic} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<TopicProperties>> updateTopicWithResponse(TopicProperties topic) { return withContext(context -> updateTopicWithResponse(topic, context)); } /** * Creates a queue with its context. * * @param createQueueOptions Queue to create. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link QueueProperties}. */ Mono<Response<QueueProperties>> createQueueWithResponse(String queueName, CreateQueueOptions createQueueOptions, Context context) { if (queueName == null) { return monoError(logger, new NullPointerException("'queueName' cannot be null.")); } else if (queueName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'queueName' cannot be empty.")); } if (createQueueOptions == null) { return monoError(logger, new NullPointerException("'createQueueOptions' cannot be null.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context contextWithTrace = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); final Context contextWithHeaders = contextWithTrace.addData(AZURE_REQUEST_HTTP_HEADERS_KEY, new HttpHeaders()); final String forwardToEntity = createQueueOptions.getForwardTo(); if (!CoreUtils.isNullOrEmpty(forwardToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardToEntity, contextWithHeaders); createQueueOptions.setForwardTo(getAbsoluteUrlFromEntity(forwardToEntity)); } final String forwardDlqToEntity = createQueueOptions.getForwardDeadLetteredMessagesTo(); if (!CoreUtils.isNullOrEmpty(forwardDlqToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_DLQ_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardDlqToEntity, contextWithHeaders); createQueueOptions.setForwardDeadLetteredMessagesTo(getAbsoluteUrlFromEntity(forwardDlqToEntity)); } final QueueDescription description = EntityHelper.getQueueDescription(createQueueOptions); final CreateQueueBodyContent content = new CreateQueueBodyContent() .setType(CONTENT_TYPE) .setQueueDescription(description); final CreateQueueBody createEntity = new CreateQueueBody() .setContent(content); try { return entityClient.putWithResponseAsync(queueName, createEntity, null, contextWithHeaders) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(this::deserializeQueue); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a rule with its context. * * @param ruleOptions Rule to create. * @param context Context to pass into request. * * * @return A Mono that completes with the created {@link RuleProperties}. */ Mono<Response<RuleProperties>> createRuleWithResponse(String topicName, String subscriptionName, String ruleName, CreateRuleOptions ruleOptions, Context context) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be empty.")); } if (subscriptionName == null) { return monoError(logger, new NullPointerException("'subscriptionName' cannot be null.")); } else if (subscriptionName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'subscriptionName' cannot be empty.")); } if (ruleName == null) { return monoError(logger, new NullPointerException("'ruleName' cannot be null.")); } else if (ruleName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'ruleName' cannot be empty.")); } if (ruleOptions == null) { return monoError(logger, new NullPointerException("'rule' cannot be null.")); } final RuleActionImpl action = ruleOptions.getAction() != null ? EntityHelper.toImplementation(ruleOptions.getAction()) : null; final RuleFilterImpl filter = ruleOptions.getFilter() != null ? EntityHelper.toImplementation(ruleOptions.getFilter()) : null; final RuleDescription rule = new RuleDescription() .setAction(action) .setFilter(filter) .setName(ruleName); final CreateRuleBodyContent content = new CreateRuleBodyContent() .setType(CONTENT_TYPE) .setRuleDescription(rule); final CreateRuleBody createEntity = new CreateRuleBody().setContent(content); final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return managementClient.getRules().putWithResponseAsync(topicName, subscriptionName, ruleName, createEntity, null, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeRule(response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a subscription with its context. * * @param subscriptionOptions Subscription to create. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link SubscriptionProperties}. */ Mono<Response<SubscriptionProperties>> createSubscriptionWithResponse(String topicName, String subscriptionName, CreateSubscriptionOptions subscriptionOptions, Context context) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be empty.")); } if (subscriptionName == null) { return monoError(logger, new NullPointerException("'subscriptionName' cannot be null.")); } else if (subscriptionName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'subscriptionName' cannot be empty.")); } if (subscriptionOptions == null) { return monoError(logger, new NullPointerException("'subscription' cannot be null.")); } final Context contextWithTrace = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); final Context contextWithHeaders = contextWithTrace.addData(AZURE_REQUEST_HTTP_HEADERS_KEY, new HttpHeaders()); final String forwardToEntity = subscriptionOptions.getForwardTo(); if (!CoreUtils.isNullOrEmpty(forwardToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardToEntity, contextWithHeaders); subscriptionOptions.setForwardTo(getAbsoluteUrlFromEntity(forwardToEntity)); } final String forwardDlqToEntity = subscriptionOptions.getForwardDeadLetteredMessagesTo(); if (!CoreUtils.isNullOrEmpty(forwardDlqToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_DLQ_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardDlqToEntity, contextWithHeaders); subscriptionOptions.setForwardDeadLetteredMessagesTo(getAbsoluteUrlFromEntity(forwardDlqToEntity)); } final SubscriptionDescription subscription = EntityHelper.getSubscriptionDescription(subscriptionOptions); final CreateSubscriptionBodyContent content = new CreateSubscriptionBodyContent() .setType(CONTENT_TYPE) .setSubscriptionDescription(subscription); final CreateSubscriptionBody createEntity = new CreateSubscriptionBody().setContent(content); try { return managementClient.getSubscriptions().putWithResponseAsync(topicName, subscriptionName, createEntity, null, contextWithHeaders) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeSubscription(topicName, response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a topicOptions with its context. * * @param topicOptions Topic to create. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link TopicProperties}. */ Mono<Response<TopicProperties>> createTopicWithResponse(String topicName, CreateTopicOptions topicOptions, Context context) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be empty.")); } if (topicOptions == null) { return monoError(logger, new NullPointerException("'topicOptions' cannot be null")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final TopicDescription topic = EntityHelper.getTopicDescription(topicOptions); final CreateTopicBodyContent content = new CreateTopicBodyContent() .setType(CONTENT_TYPE) .setTopicDescription(topic); final CreateTopicBody createEntity = new CreateTopicBody() .setContent(content); final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return entityClient.putWithResponseAsync(topicName, createEntity, null, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(this::deserializeTopic); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes a queue with its context. * * @param queueName Name of queue to delete. * @param context Context to pass into request. * * @return A Mono that completes when the queue is deleted. */ Mono<Response<Void>> deleteQueueWithResponse(String queueName, Context context) { if (queueName == null) { return monoError(logger, new NullPointerException("'queueName' cannot be null")); } else if (queueName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'queueName' cannot be an empty string.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return entityClient.deleteWithResponseAsync(queueName, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes a queue with its context. * * @param topicName Name of topic to delete. * @param subscriptionName Name of the subscription for the rule. * @param ruleName Name of the rule. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link QueueProperties}. */ Mono<Response<Void>> deleteRuleWithResponse(String topicName, String subscriptionName, String ruleName, Context context) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } else if (subscriptionName == null) { return monoError(logger, new NullPointerException("'subscriptionName' cannot be null")); } else if (subscriptionName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'subscriptionName' cannot be an empty string.")); } else if (ruleName == null) { return monoError(logger, new NullPointerException("'ruleName' cannot be null")); } else if (ruleName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'ruleName' cannot be an empty string.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return rulesClient.deleteWithResponseAsync(topicName, subscriptionName, ruleName, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes a subscription with its context. * * @param topicName Name of topic associated with subscription to delete. * @param subscriptionName Name of subscription to delete. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link SubscriptionProperties}. */ Mono<Response<Void>> deleteSubscriptionWithResponse(String topicName, String subscriptionName, Context context) { if (subscriptionName == null) { return monoError(logger, new NullPointerException("'subscriptionName' cannot be null")); } else if (subscriptionName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'subscriptionName' cannot be an empty string.")); } else if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return managementClient.getSubscriptions().deleteWithResponseAsync(topicName, subscriptionName, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes a topic with its context. * * @param topicName Name of topic to delete. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link TopicProperties}. */ Mono<Response<Void>> deleteTopicWithResponse(String topicName, Context context) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return entityClient.deleteWithResponseAsync(topicName, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets whether an entity exists. * * @param getEntityOperation Operation to get information about entity. If {@link ResourceNotFoundException} is * thrown, then it is mapped to false. * @param <T> Entity type. * * @return True if the entity exists, false otherwise. */ <T> Mono<Response<Boolean>> getEntityExistsWithResponse(Mono<Response<T>> getEntityOperation) { return getEntityOperation.map(response -> { final boolean exists = response.getValue() != null; return (Response<Boolean>) new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), exists); }) .onErrorResume(ResourceNotFoundException.class, exception -> { final HttpResponse response = exception.getResponse(); final Response<Boolean> result = new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), false); return Mono.just(result); }); } /** * Gets a queue with its context. * * @param queueName Name of queue to fetch information for. * @param context Context to pass into request. * * @return A Mono that completes with the {@link QueueProperties}. */ <T> Mono<Response<T>> getQueueWithResponse(String queueName, Context context, Function<QueueProperties, T> mapper) { if (queueName == null) { return monoError(logger, new NullPointerException("'queueName' cannot be null")); } else if (queueName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'queueName' cannot be empty.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return entityClient.getWithResponseAsync(queueName, true, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .handle((response, sink) -> { final Response<QueueProperties> deserialize = deserializeQueue(response); if (deserialize.getValue() == null) { final HttpResponse notFoundResponse = new EntityNotFoundHttpResponse<>(deserialize); sink.error(new ResourceNotFoundException(String.format("Queue '%s' does not exist.", queueName), notFoundResponse)); } else { final T mapped = mapper.apply(deserialize.getValue()); sink.next(new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), mapped)); } }); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<RuleProperties>> getRuleWithResponse(String topicName, String subscriptionName, String ruleName, Context context) { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return rulesClient.getWithResponseAsync(topicName, subscriptionName, ruleName, true, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(this::deserializeRule); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets a subscription with its context. * * @param topicName Name of the topic associated with the subscription. * @param subscriptionName Name of subscription to fetch information for. * @param context Context to pass into request. * * @return A Mono that completes with the {@link SubscriptionProperties}. */ <T> Mono<Response<T>> getSubscriptionWithResponse(String topicName, String subscriptionName, Context context, Function<SubscriptionProperties, T> mapper) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } else if (subscriptionName == null) { return monoError(logger, new NullPointerException("'subscriptionName' cannot be null.")); } else if (subscriptionName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'subscriptionName' cannot be an empty string.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return managementClient.getSubscriptions().getWithResponseAsync(topicName, subscriptionName, true, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .handle((response, sink) -> { final Response<SubscriptionProperties> deserialize = deserializeSubscription(topicName, response); if (deserialize.getValue() == null) { final HttpResponse notFoundResponse = new EntityNotFoundHttpResponse<>(deserialize); sink.error(new ResourceNotFoundException(String.format( "Subscription '%s' in topic '%s' does not exist.", topicName, subscriptionName), notFoundResponse)); } else { final T mapped = mapper.apply(deserialize.getValue()); sink.next(new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), mapped)); } }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets the namespace properties with its context. * * @param context Context to pass into request. * * @return A Mono that completes with the {@link NamespaceProperties}. */ Mono<Response<NamespaceProperties>> getNamespacePropertiesWithResponse(Context context) { return managementClient.getNamespaces().getWithResponseAsync(context).handle((response, sink) -> { final NamespacePropertiesEntry entry = response.getValue(); if (entry == null || entry.getContent() == null) { sink.error(new AzureException( "There was no content inside namespace response. Entry: " + response)); return; } final NamespaceProperties namespaceProperties = entry.getContent().getNamespaceProperties(); final Response<NamespaceProperties> result = new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), namespaceProperties); sink.next(result); }); } /** * Gets a topic with its context. * * @param topicName Name of topic to fetch information for. * @param context Context to pass into request. * * @return A Mono that completes with the {@link TopicProperties}. */ <T> Mono<Response<T>> getTopicWithResponse(String topicName, Context context, Function<TopicProperties, T> mapper) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be empty.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return entityClient.getWithResponseAsync(topicName, true, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .handle((response, sink) -> { final Response<TopicProperties> deserialize = deserializeTopic(response); if (deserialize.getValue() == null) { final HttpResponse notFoundResponse = new EntityNotFoundHttpResponse<>(deserialize); sink.error(new ResourceNotFoundException(String.format("Topic '%s' does not exist.", topicName), notFoundResponse)); } else { final T mapped = mapper.apply(deserialize.getValue()); sink.next(new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), mapped)); } }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets the first page of queues with context. * * @param context Context to pass into request. * * @return A Mono that completes with a page of queues. */ Mono<PagedResponse<QueueProperties>> listQueuesFirstPage(Context context) { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return listQueues(0, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the next page of queues with context. * * @param continuationToken Number of items to skip in feed. * @param context Context to pass into request. * * @return A Mono that completes with a page of queues or empty if there are no items left. */ Mono<PagedResponse<QueueProperties>> listQueuesNextPage(String continuationToken, Context context) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } try { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); final int skip = Integer.parseInt(continuationToken); return listQueues(skip, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the first page of rules with context. * * @param context Context to pass into request. * * @return A Mono that completes with a page of rules. */ Mono<PagedResponse<RuleProperties>> listRulesFirstPage(String topicName, String subscriptionName, Context context) { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return listRules(topicName, subscriptionName, 0, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the next page of rules with context. * * @param continuationToken Number of items to skip in feed. * @param context Context to pass into request. * * @return A Mono that completes with a page of rules or empty if there are no items left. */ Mono<PagedResponse<RuleProperties>> listRulesNextPage(String topicName, String subscriptionName, String continuationToken, Context context) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } try { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); final int skip = Integer.parseInt(continuationToken); return listRules(topicName, subscriptionName, skip, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the first page of subscriptions with context. * * @param context Context to pass into request. * * @return A Mono that completes with a page of subscriptions. */ Mono<PagedResponse<SubscriptionProperties>> listSubscriptionsFirstPage(String topicName, Context context) { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return listSubscriptions(topicName, 0, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the next page of subscriptions with context. * * @param continuationToken Number of items to skip in feed. * @param context Context to pass into request. * * @return A Mono that completes with a page of subscriptions or empty if there are no items left. */ Mono<PagedResponse<SubscriptionProperties>> listSubscriptionsNextPage(String topicName, String continuationToken, Context context) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } try { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); final int skip = Integer.parseInt(continuationToken); return listSubscriptions(topicName, skip, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the first page of topics with context. * * @param context Context to pass into request. * * @return A Mono that completes with a page of topics. */ Mono<PagedResponse<TopicProperties>> listTopicsFirstPage(Context context) { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return listTopics(0, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the next page of topics with context. * * @param continuationToken Number of items to skip in feed. * @param context Context to pass into request. * * @return A Mono that completes with a page of topics or empty if there are no items left. */ Mono<PagedResponse<TopicProperties>> listTopicsNextPage(String continuationToken, Context context) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } try { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); final int skip = Integer.parseInt(continuationToken); return listTopics(skip, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Updates a queue with its context. * * @param queue Information about the queue to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * @param context Context to pass into request. * * @return A Mono that completes with the updated {@link QueueProperties}. */ Mono<Response<QueueProperties>> updateQueueWithResponse(QueueProperties queue, Context context) { if (queue == null) { final Context contextWithTrace = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); final Context contextWithHeaders = contextWithTrace.addData(AZURE_REQUEST_HTTP_HEADERS_KEY, new HttpHeaders()); final String forwardToEntity = queue.getForwardTo(); if (!CoreUtils.isNullOrEmpty(forwardToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardToEntity, contextWithHeaders); queue.setForwardTo(getAbsoluteUrlFromEntity(forwardToEntity)); } final String forwardDlqToEntity = queue.getForwardDeadLetteredMessagesTo(); if (!CoreUtils.isNullOrEmpty(forwardDlqToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_DLQ_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardDlqToEntity, contextWithHeaders); queue.setForwardDeadLetteredMessagesTo(getAbsoluteUrlFromEntity(forwardDlqToEntity)); } final QueueDescription queueDescription = EntityHelper.toImplementation(queue); final CreateQueueBodyContent content = new CreateQueueBodyContent() .setType(CONTENT_TYPE) .setQueueDescription(queueDescription); final CreateQueueBody createEntity = new CreateQueueBody() .setContent(content); try { return entityClient.putWithResponseAsync(queue.getName(), createEntity, "*", contextWithHeaders) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeQueue(response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates a rule with its context. * * @param rule Information about the rule to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * @param context Context to pass into request. * * @return A Mono that completes with the updated {@link RuleProperties}. */ Mono<Response<RuleProperties>> updateRuleWithResponse(String topicName, String subscriptionName, RuleProperties rule, Context context) { if (rule == null) { return monoError(logger, new NullPointerException("'rule' cannot be null")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final RuleDescription implementation = EntityHelper.toImplementation(rule); final CreateRuleBodyContent content = new CreateRuleBodyContent() .setType(CONTENT_TYPE) .setRuleDescription(implementation); final CreateRuleBody ruleBody = new CreateRuleBody() .setContent(content); final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return managementClient.getRules().putWithResponseAsync(topicName, subscriptionName, rule.getName(), ruleBody, "*", withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeRule(response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates a subscription with its context. * * @param subscription Information about the subscription to update. You must provide all the property values * that are desired on the updated entity. Any values not provided are set to the service default values. * @param context Context to pass into request. * * @return A Mono that completes with the updated {@link SubscriptionProperties}. */ Mono<Response<SubscriptionProperties>> updateSubscriptionWithResponse(SubscriptionProperties subscription, Context context) { if (subscription == null) { return monoError(logger, new NullPointerException("'subscription' cannot be null")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context contextWithTrace = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); final Context contextWithHeaders = contextWithTrace.addData(AZURE_REQUEST_HTTP_HEADERS_KEY, new HttpHeaders()); final String forwardToEntity = subscription.getForwardTo(); if (!CoreUtils.isNullOrEmpty(forwardToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardToEntity, contextWithHeaders); subscription.setForwardTo(getAbsoluteUrlFromEntity(forwardToEntity)); } final String forwardDlqToEntity = subscription.getForwardDeadLetteredMessagesTo(); if (!CoreUtils.isNullOrEmpty(forwardDlqToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_DLQ_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardDlqToEntity, contextWithHeaders); subscription.setForwardDeadLetteredMessagesTo(getAbsoluteUrlFromEntity(forwardDlqToEntity)); } final String topicName = subscription.getTopicName(); final String subscriptionName = subscription.getSubscriptionName(); final SubscriptionDescription implementation = EntityHelper.toImplementation(subscription); final CreateSubscriptionBodyContent content = new CreateSubscriptionBodyContent() .setType(CONTENT_TYPE) .setSubscriptionDescription(implementation); final CreateSubscriptionBody createEntity = new CreateSubscriptionBody() .setContent(content); try { return managementClient.getSubscriptions().putWithResponseAsync(topicName, subscriptionName, createEntity, "*", contextWithHeaders) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeSubscription(topicName, response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates a topic with its context. * * @param topic Information about the topic to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * @param context Context to pass into request. * * @return A Mono that completes with the updated {@link TopicProperties}. */ Mono<Response<TopicProperties>> updateTopicWithResponse(TopicProperties topic, Context context) { if (topic == null) { return monoError(logger, new NullPointerException("'topic' cannot be null")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final TopicDescription implementation = EntityHelper.toImplementation(topic); final CreateTopicBodyContent content = new CreateTopicBodyContent() .setType(CONTENT_TYPE) .setTopicDescription(implementation); final CreateTopicBody createEntity = new CreateTopicBody() .setContent(content); final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return entityClient.putWithResponseAsync(topic.getName(), createEntity, "*", withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeTopic(response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } private <T> T deserialize(Object object, Class<T> clazz) { if (object == null) { return null; } final String contents = String.valueOf(object); if (contents.isEmpty()) { return null; } try { return serializer.deserialize(contents, clazz); } catch (IOException e) { throw logger.logExceptionAsError(new RuntimeException(String.format( "Exception while deserializing. Body: [%s]. Class: %s", contents, clazz), e)); } } /** * Given an HTTP response, will deserialize it into a strongly typed Response object. * * @param response HTTP response to deserialize response body from. * @param clazz Class to deserialize response type into. * @param <T> Class type to deserialize response into. * * @return A Response with a strongly typed response value. */ private <T> Response<T> deserialize(Response<Object> response, Class<T> clazz) { final T deserialize = deserialize(response.getValue(), clazz); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), deserialize); } /** * Converts a Response into its corresponding {@link QueueDescriptionEntry} then mapped into {@link * QueueProperties}. * * @param response HTTP Response to deserialize. * * @return The corresponding HTTP response with convenience properties set. */ private Response<QueueProperties> deserializeQueue(Response<Object> response) { final QueueDescriptionEntry entry = deserialize(response.getValue(), QueueDescriptionEntry.class); if (entry == null) { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent() == null) { logger.info("entry.getContent() is null. The entity may not exist. {}", entry); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent().getQueueDescription() == null) { final TopicDescriptionEntry entryTopic = deserialize(response.getValue(), TopicDescriptionEntry.class); if (entryTopic != null && entryTopic.getContent() != null && entryTopic.getContent().getTopicDescription() != null) { logger.warning("'{}' is not a queue, it is a topic.", entryTopic.getTitle()); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } } final QueueProperties result = EntityHelper.toModel(entry.getContent().getQueueDescription()); final String queueName = getTitleValue(entry.getTitle()); EntityHelper.setQueueName(result, queueName); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), result); } /** * Converts a Response into its corresponding {@link RuleDescriptionEntry} then mapped into {@link RuleProperties}. * * @param response HTTP Response to deserialize. * * @return The corresponding HTTP response with convenience properties set. */ private Response<RuleProperties> deserializeRule(Response<Object> response) { final RuleDescriptionEntry entry = deserialize(response.getValue(), RuleDescriptionEntry.class); if (entry == null) { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent() == null) { logger.info("entry.getContent() is null. The entity may not exist. {}", entry); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } final RuleDescription description = entry.getContent().getRuleDescription(); final RuleProperties result = EntityHelper.toModel(description); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), result); } /** * Converts a Response into its corresponding {@link SubscriptionDescriptionEntry} then mapped into {@link * SubscriptionProperties}. * * @param response HTTP Response to deserialize. * * @return The corresponding HTTP response with convenience properties set. */ private Response<SubscriptionProperties> deserializeSubscription(String topicName, Response<Object> response) { final SubscriptionDescriptionEntry entry = deserialize(response.getValue(), SubscriptionDescriptionEntry.class); if (entry == null) { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent() == null) { logger.warning("entry.getContent() is null. There should have been content returned. Entry: {}", entry); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } final SubscriptionProperties subscription = EntityHelper.toModel( entry.getContent().getSubscriptionDescription()); final String subscriptionName = getTitleValue(entry.getTitle()); EntityHelper.setSubscriptionName(subscription, subscriptionName); EntityHelper.setTopicName(subscription, topicName); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), subscription); } /** * Converts a Response into its corresponding {@link TopicDescriptionEntry} then mapped into {@link * QueueProperties}. * * @param response HTTP Response to deserialize. * * @return The corresponding HTTP response with convenience properties set. */ private Response<TopicProperties> deserializeTopic(Response<Object> response) { final TopicDescriptionEntry entry = deserialize(response.getValue(), TopicDescriptionEntry.class); if (entry == null) { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent() == null) { logger.warning("entry.getContent() is null. There should have been content returned. Entry: {}", entry); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent().getTopicDescription() == null) { final QueueDescriptionEntry entryQueue = deserialize(response.getValue(), QueueDescriptionEntry.class); if (entryQueue != null && entryQueue.getContent() != null && entryQueue.getContent().getQueueDescription() != null) { logger.warning("'{}' is not a topic, it is a queue.", entryQueue.getTitle()); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } } final TopicProperties result = EntityHelper.toModel(entry.getContent().getTopicDescription()); final String topicName = getTitleValue(entry.getTitle()); EntityHelper.setTopicName(result, topicName); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), result); } /** * Creates a {@link FeedPage} given the elements and a set of response links to get the next link from. * * @param entities Entities in the feed. * @param responseLinks Links returned from the feed. * @param <TResult> Type of Service Bus entities in page. * * @return A {@link FeedPage} indicating whether this can be continued or not. * @throws MalformedURLException if the "next" page link does not contain a well-formed URL. */ private <TResult, TFeed> FeedPage<TResult> extractPage(Response<TFeed> response, List<TResult> entities, List<ResponseLink> responseLinks) throws MalformedURLException, UnsupportedEncodingException { final Optional<ResponseLink> nextLink = responseLinks.stream() .filter(link -> link.getRel().equalsIgnoreCase("next")) .findFirst(); if (!nextLink.isPresent()) { return new FeedPage<>(response.getStatusCode(), response.getHeaders(), response.getRequest(), entities); } final URL url = new URL(nextLink.get().getHref()); final String decode = URLDecoder.decode(url.getQuery(), StandardCharsets.UTF_8.name()); final Optional<Integer> skipParameter = Arrays.stream(decode.split("&amp;|&")) .map(part -> part.split("=", 2)) .filter(parts -> parts[0].equalsIgnoreCase("$skip") && parts.length == 2) .map(parts -> Integer.valueOf(parts[1])) .findFirst(); if (skipParameter.isPresent()) { return new FeedPage<>(response.getStatusCode(), response.getHeaders(), response.getRequest(), entities, skipParameter.get()); } else { logger.warning("There should have been a skip parameter for the next page."); return new FeedPage<>(response.getStatusCode(), response.getHeaders(), response.getRequest(), entities); } } /** * Helper method that invokes the service method, extracts the data and translates it to a PagedResponse. * * @param skip Number of elements to skip. * @param context Context for the query. * * @return A Mono that completes with a paged response of queues. */ private Mono<PagedResponse<QueueProperties>> listQueues(int skip, Context context) { return managementClient.listEntitiesWithResponseAsync(QUEUES_ENTITY_TYPE, skip, NUMBER_OF_ELEMENTS, context) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .flatMap(response -> { final Response<QueueDescriptionFeed> feedResponse = deserialize(response, QueueDescriptionFeed.class); final QueueDescriptionFeed feed = feedResponse.getValue(); if (feed == null) { logger.warning("Could not deserialize QueueDescriptionFeed. skip {}, top: {}", skip, NUMBER_OF_ELEMENTS); return Mono.empty(); } final List<QueueProperties> entities = feed.getEntry().stream() .filter(e -> e.getContent() != null && e.getContent().getQueueDescription() != null) .map(e -> { final String queueName = getTitleValue(e.getTitle()); final QueueProperties queueProperties = EntityHelper.toModel( e.getContent().getQueueDescription()); EntityHelper.setQueueName(queueProperties, queueName); return queueProperties; }) .collect(Collectors.toList()); try { return Mono.just(extractPage(feedResponse, entities, feed.getLink())); } catch (MalformedURLException | UnsupportedEncodingException error) { return Mono.error(new RuntimeException("Could not parse response into FeedPage<QueueDescription>", error)); } }); } /** * Helper method that invokes the service method, extracts the data and translates it to a PagedResponse. * * @param skip Number of elements to skip. * @param context Context for the query. * * @return A Mono that completes with a paged response of rules. */ private Mono<PagedResponse<RuleProperties>> listRules(String topicName, String subscriptionName, int skip, Context context) { return managementClient.listRulesWithResponseAsync(topicName, subscriptionName, skip, NUMBER_OF_ELEMENTS, context) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .flatMap(response -> { final Response<RuleDescriptionFeed> feedResponse = deserialize(response, RuleDescriptionFeed.class); final RuleDescriptionFeed feed = feedResponse.getValue(); if (feed == null) { logger.warning("Could not deserialize RuleDescriptionFeed. skip {}, top: {}", skip, NUMBER_OF_ELEMENTS); return Mono.empty(); } final List<RuleProperties> entities = feed.getEntry().stream() .filter(e -> e.getContent() != null && e.getContent().getRuleDescription() != null) .map(e -> { return EntityHelper.toModel(e.getContent().getRuleDescription()); }) .collect(Collectors.toList()); try { return Mono.just(extractPage(feedResponse, entities, feed.getLink())); } catch (MalformedURLException | UnsupportedEncodingException error) { return Mono.error(new RuntimeException( "Could not parse response into FeedPage<RuleDescription>", error)); } }); } /** * Helper method that invokes the service method, extracts the data and translates it to a PagedResponse. * * @param skip Number of elements to skip. * @param context Context for the query. * * @return A Mono that completes with a paged response of subscriptions. */ private Mono<PagedResponse<SubscriptionProperties>> listSubscriptions(String topicName, int skip, Context context) { return managementClient.listSubscriptionsWithResponseAsync(topicName, skip, NUMBER_OF_ELEMENTS, context) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .flatMap(response -> { final Response<SubscriptionDescriptionFeed> feedResponse = deserialize(response, SubscriptionDescriptionFeed.class); final SubscriptionDescriptionFeed feed = feedResponse.getValue(); if (feed == null) { logger.warning("Could not deserialize SubscriptionDescriptionFeed. skip {}, top: {}", skip, NUMBER_OF_ELEMENTS); return Mono.empty(); } final List<SubscriptionProperties> entities = feed.getEntry().stream() .filter(e -> e.getContent() != null && e.getContent().getSubscriptionDescription() != null) .map(e -> { final String subscriptionName = getTitleValue(e.getTitle()); final SubscriptionProperties description = EntityHelper.toModel( e.getContent().getSubscriptionDescription()); EntityHelper.setTopicName(description, topicName); EntityHelper.setSubscriptionName(description, subscriptionName); return description; }) .collect(Collectors.toList()); try { return Mono.just(extractPage(feedResponse, entities, feed.getLink())); } catch (MalformedURLException | UnsupportedEncodingException error) { return Mono.error(new RuntimeException( "Could not parse response into FeedPage<SubscriptionDescription>", error)); } }); } /** * Helper method that invokes the service method, extracts the data and translates it to a PagedResponse. * * @param skip Number of elements to skip. * @param context Context for the query. * * @return A Mono that completes with a paged response of topics. */ private Mono<PagedResponse<TopicProperties>> listTopics(int skip, Context context) { return managementClient.listEntitiesWithResponseAsync(TOPICS_ENTITY_TYPE, skip, NUMBER_OF_ELEMENTS, context) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .flatMap(response -> { final Response<TopicDescriptionFeed> feedResponse = deserialize(response, TopicDescriptionFeed.class); final TopicDescriptionFeed feed = feedResponse.getValue(); if (feed == null) { logger.warning("Could not deserialize TopicDescriptionFeed. skip {}, top: {}", skip, NUMBER_OF_ELEMENTS); return Mono.empty(); } final List<TopicProperties> entities = feed.getEntry().stream() .filter(e -> e.getContent() != null && e.getContent().getTopicDescription() != null) .map(e -> { final String topicName = getTitleValue(e.getTitle()); final TopicProperties topicProperties = EntityHelper.toModel( e.getContent().getTopicDescription()); EntityHelper.setTopicName(topicProperties, topicName); return topicProperties; }) .collect(Collectors.toList()); try { return Mono.just(extractPage(feedResponse, entities, feed.getLink())); } catch (MalformedURLException | UnsupportedEncodingException error) { return Mono.error(new RuntimeException("Could not parse response into FeedPage<TopicDescription>", error)); } }); } /** * Check that the additional headers field is present and add the additional auth header * * @param headerName name of the header to be added * @param context current request context * * @return boolean representing the outcome of adding header operation */ private void addSupplementaryAuthHeader(String headerName, String entity, Context context) { context.getData(AZURE_REQUEST_HTTP_HEADERS_KEY) .map(headers -> { if (headers instanceof HttpHeaders) { HttpHeaders customHttpHeaders = (HttpHeaders) headers; customHttpHeaders.add(headerName, entity); } return null; }); } /** * Checks if the given entity is an absolute URL, if so return it. * Otherwise, construct the URL from the given entity and return that. * * @param entity : entity to forward messages to. * * @return Forward to Entity represented as an absolute URL */ private String getAbsoluteUrlFromEntity(String entity) { try { URL url = new URL(entity); return url.toString(); } catch (MalformedURLException ex) { } UrlBuilder urlBuilder = new UrlBuilder(); urlBuilder.setScheme("https"); urlBuilder.setHost(managementClient.getEndpoint()); urlBuilder.setPath(entity); try { URL url = urlBuilder.toUrl(); return url.toString(); } catch (MalformedURLException ex) { logger.error("Failed to construct URL using the endpoint:'{}' and entity:'{}'", managementClient.getEndpoint(), entity); logger.logThrowableAsError(ex); } return null; } /** * Given an XML title element, returns the XML text inside. Jackson deserializes Objects as LinkedHashMaps. XML text * is represented as an entry with an empty string as the key. * * For example, the text returned from this {@code <title text="text/xml">QueueName</title>} is "QueueName". * * @param responseTitle XML title element. * * @return The XML text inside the title. {@code null} is returned if there is no value. */ @SuppressWarnings("unchecked") private String getTitleValue(Object responseTitle) { if (!(responseTitle instanceof Map)) { return null; } final Map<String, String> map; try { map = (Map<String, String>) responseTitle; return map.get(""); } catch (ClassCastException error) { logger.warning("Unable to cast to Map<String,String>. Title: {}", responseTitle, error); return null; } } /** * Maps an exception from the ATOM APIs to its associated {@link HttpResponseException}. * * @param exception Exception from the ATOM API. * * @return The corresponding {@link HttpResponseException} or {@code throwable} if it is not an instance of {@link * ServiceBusManagementErrorException}. */ private static Throwable mapException(Throwable exception) { if (!(exception instanceof ServiceBusManagementErrorException)) { return exception; } final ServiceBusManagementErrorException managementError = ((ServiceBusManagementErrorException) exception); final ServiceBusManagementError error = managementError.getValue(); final HttpResponse errorHttpResponse = managementError.getResponse(); final int statusCode = error != null && error.getCode() != null ? error.getCode() : errorHttpResponse.getStatusCode(); final String errorDetail = error != null && error.getDetail() != null ? error.getDetail() : managementError.getMessage(); switch (statusCode) { case 401: return new ClientAuthenticationException(errorDetail, managementError.getResponse(), exception); case 404: return new ResourceNotFoundException(errorDetail, managementError.getResponse(), exception); case 409: return new ResourceExistsException(errorDetail, managementError.getResponse(), exception); case 412: return new ResourceModifiedException(errorDetail, managementError.getResponse(), exception); default: return new HttpResponseException(errorDetail, managementError.getResponse(), exception); } } /** * A page of Service Bus entities. * * @param <T> The entity description from Service Bus. */ private static final class FeedPage<T> implements PagedResponse<T> { private final int statusCode; private final HttpHeaders header; private final HttpRequest request; private final IterableStream<T> entries; private final String continuationToken; /** * Creates a page that does not have any more pages. * * @param entries Items in the page. */ private FeedPage(int statusCode, HttpHeaders header, HttpRequest request, List<T> entries) { this.statusCode = statusCode; this.header = header; this.request = request; this.entries = new IterableStream<>(entries); this.continuationToken = null; } /** * Creates an instance that has additional pages to fetch. * * @param entries Items in the page. * @param skip Number of elements to "skip". */ private FeedPage(int statusCode, HttpHeaders header, HttpRequest request, List<T> entries, int skip) { this.statusCode = statusCode; this.header = header; this.request = request; this.entries = new IterableStream<>(entries); this.continuationToken = String.valueOf(skip); } @Override public IterableStream<T> getElements() { return entries; } @Override public String getContinuationToken() { return continuationToken; } @Override public int getStatusCode() { return statusCode; } @Override public HttpHeaders getHeaders() { return header; } @Override public HttpRequest getRequest() { return request; } @Override public void close() { } } private static final class EntityNotFoundHttpResponse<T> extends HttpResponse { private final int statusCode; private final HttpHeaders headers; private EntityNotFoundHttpResponse(Response<T> response) { super(response.getRequest()); this.headers = response.getHeaders(); this.statusCode = response.getStatusCode(); } @Override public int getStatusCode() { return statusCode; } @Override public String getHeaderValue(String name) { return headers.getValue(name); } @Override public HttpHeaders getHeaders() { return headers; } @Override public Flux<ByteBuffer> getBody() { return Flux.empty(); } @Override public Mono<byte[]> getBodyAsByteArray() { return Mono.empty(); } @Override public Mono<String> getBodyAsString() { return Mono.empty(); } @Override public Mono<String> getBodyAsString(Charset charset) { return Mono.empty(); } } }
class ServiceBusAdministrationAsyncClient { private static final String CONTENT_TYPE = "application/xml"; private static final String QUEUES_ENTITY_TYPE = "queues"; private static final String TOPICS_ENTITY_TYPE = "topics"; private static final int NUMBER_OF_ELEMENTS = 100; private final ServiceBusManagementClientImpl managementClient; private final EntitiesImpl entityClient; private final ClientLogger logger = new ClientLogger(ServiceBusAdministrationAsyncClient.class); private final ServiceBusManagementSerializer serializer; private final RulesImpl rulesClient; /** * Creates a new instance with the given management client and serializer. * * @param managementClient Client to make management calls. * @param serializer Serializer to deserialize ATOM XML responses. * * @throws NullPointerException if any one of {@code managementClient, serializer, credential} is null. */ ServiceBusAdministrationAsyncClient(ServiceBusManagementClientImpl managementClient, ServiceBusManagementSerializer serializer) { this.serializer = Objects.requireNonNull(serializer, "'serializer' cannot be null."); this.managementClient = Objects.requireNonNull(managementClient, "'managementClient' cannot be null."); this.entityClient = managementClient.getEntities(); this.rulesClient = managementClient.getRules(); } /** * Creates a queue with the given name. * * @param queueName Name of the queue to create. * * @return A Mono that completes with information about the created queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code queueName} is null. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws ResourceExistsException if a queue exists with the same {@code queueName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueProperties> createQueue(String queueName) { try { return createQueue(queueName, new CreateQueueOptions()); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Creates a queue with the {@link CreateQueueOptions} and given queue name. * * @param queueName Name of the queue to create. * @param queueOptions Options about the queue to create. * * @return A Mono that completes with information about the created queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} or {@code queueOptions} is null. * @throws ResourceExistsException if a queue exists with the same {@link QueueProperties * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueProperties> createQueue(String queueName, CreateQueueOptions queueOptions) { return createQueueWithResponse(queueName, queueOptions).map(Response::getValue); } /** * Creates a queue and returns the created queue in addition to the HTTP response. * * @param queueName Name of the queue to create. * @param queueOptions Options about the queue to create. * * @return A Mono that returns the created queue in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} or {@code queueOptions} is null. * @throws ResourceExistsException if a queue exists with the same {@link QueueProperties * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<QueueProperties>> createQueueWithResponse(String queueName, CreateQueueOptions queueOptions) { return withContext(context -> createQueueWithResponse(queueName, queueOptions, context)); } /** * Creates a rule under the given topic and subscription * * @param topicName Name of the topic associated with rule. * @param subscriptionName Name of the subscription associated with the rule. * @param ruleName Name of the rule. * * @return A Mono that completes with information about the created rule. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} are are empty strings. * @throws NullPointerException if {@code topicName} or {@code ruleName} are are null. * @throws ResourceExistsException if a rule exists with the same topic, subscription, and rule name. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<RuleProperties> createRule(String topicName, String subscriptionName, String ruleName) { try { return createRule(topicName, subscriptionName, ruleName, new CreateRuleOptions()); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Creates a rule with the {@link CreateRuleOptions}. * * @param topicName Name of the topic associated with rule. * @param subscriptionName Name of the subscription associated with the rule. * @param ruleName Name of the rule. * @param ruleOptions Information about the rule to create. * * @return A Mono that completes with information about the created rule. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} are are empty strings. * @throws NullPointerException if {@code topicName}, {@code ruleName}, or {@code ruleOptions} * are are null. * @throws ResourceExistsException if a rule exists with the same topic and rule name. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<RuleProperties> createRule(String topicName, String subscriptionName, String ruleName, CreateRuleOptions ruleOptions) { return createRuleWithResponse(topicName, subscriptionName, ruleName, ruleOptions) .map(Response::getValue); } /** * Creates a rule and returns the created rule in addition to the HTTP response. * * @param topicName Name of the topic associated with rule. * @param subscriptionName Name of the subscription associated with the rule. * @param ruleName Name of the rule. * @param ruleOptions Information about the rule to create. * * @return A Mono that returns the created rule in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} are are empty strings. * @throws NullPointerException if {@code topicName}, {@code ruleName}, or {@code ruleOptions} * are are null. * @throws ResourceExistsException if a rule exists with the same topic and rule name. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<RuleProperties>> createRuleWithResponse(String topicName, String subscriptionName, String ruleName, CreateRuleOptions ruleOptions) { return withContext(context -> createRuleWithResponse(topicName, subscriptionName, ruleName, ruleOptions, context)); } /** * Creates a subscription with the given topic and subscription names. * * @param topicName Name of the topic associated with subscription. * @param subscriptionName Name of the subscription. * * @return A Mono that completes with information about the created subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are are empty strings. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} are are null. * @throws ResourceExistsException if a subscription exists with the same topic and subscription name. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionProperties> createSubscription(String topicName, String subscriptionName) { try { return createSubscription(topicName, subscriptionName, new CreateSubscriptionOptions()); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Creates a subscription with the {@link CreateSubscriptionOptions}. * * @param topicName Name of the topic associated with subscription. * @param subscriptionName Name of the subscription. * @param subscriptionOptions Information about the subscription to create. * * @return A Mono that completes with information about the created subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are are empty strings. * @throws NullPointerException if {@code topicName}, {@code subscriptionName}, or {@code subscriptionOptions} * are are null. * @throws ResourceExistsException if a subscription exists with the same topic and subscription name. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionProperties> createSubscription(String topicName, String subscriptionName, CreateSubscriptionOptions subscriptionOptions) { return createSubscriptionWithResponse(topicName, subscriptionName, subscriptionOptions) .map(Response::getValue); } /** * Creates a subscription and returns the created subscription in addition to the HTTP response. * * @param topicName Name of the topic associated with subscription. * @param subscriptionName Name of the subscription. * @param subscriptionOptions Information about the subscription to create. * * @return A Mono that returns the created subscription in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are are empty strings. * @throws NullPointerException if {@code topicName}, {@code subscriptionName}, or {@code subscriptionOptions} * are are null. * @throws ResourceExistsException if a subscription exists with the same topic and subscription name. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SubscriptionProperties>> createSubscriptionWithResponse(String topicName, String subscriptionName, CreateSubscriptionOptions subscriptionOptions) { return withContext(context -> createSubscriptionWithResponse(topicName, subscriptionName, subscriptionOptions, context)); } /** * Creates a topic with the given name. * * @param topicName Name of the topic to create. * * @return A Mono that completes with information about the created topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code topicName} is null. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws ResourceExistsException if a topic exists with the same {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicProperties> createTopic(String topicName) { try { return createTopic(topicName, new CreateTopicOptions()); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Creates a topic with the {@link CreateTopicOptions}. * * @param topicName Name of the topic to create. * @param topicOptions The options used to create the topic. * * @return A Mono that completes with information about the created topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code topicOptions} is null. * @throws ResourceExistsException if a topic exists with the same {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicProperties> createTopic(String topicName, CreateTopicOptions topicOptions) { return createTopicWithResponse(topicName, topicOptions).map(Response::getValue); } /** * Creates a topic and returns the created topic in addition to the HTTP response. * * @param topicName Name of the topic to create. * @param topicOptions The options used to create the topic. * * @return A Mono that returns the created topic in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link TopicProperties * string. * @throws NullPointerException if {@code topicName} or {@code topicOptions} is null. * @throws ResourceExistsException if a topic exists with the same {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<TopicProperties>> createTopicWithResponse(String topicName, CreateTopicOptions topicOptions) { return withContext(context -> createTopicWithResponse(topicName, topicOptions, context)); } /** * Deletes a queue the matching {@code queueName}. * * @param queueName Name of queue to delete. * * @return A Mono that completes when the queue is deleted. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws NullPointerException if {@code queueName} is null. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteQueue(String queueName) { return deleteQueueWithResponse(queueName).then(); } /** * Deletes a queue the matching {@code queueName} and returns the HTTP response. * * @param queueName Name of queue to delete. * * @return A Mono that completes when the queue is deleted and returns the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws NullPointerException if {@code queueName} is null. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteQueueWithResponse(String queueName) { return withContext(context -> deleteQueueWithResponse(queueName, context)); } /** * Deletes a rule the matching {@code ruleName}. * * @param topicName Name of topic associated with rule to delete. * @param subscriptionName Name of the subscription associated with the rule to delete. * @param ruleName Name of rule to delete. * * @return A Mono that completes when the rule is deleted. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code ruleName} is null. * @throws ResourceNotFoundException if the {@code ruleName} does not exist. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteRule(String topicName, String subscriptionName, String ruleName) { return deleteRuleWithResponse(topicName, subscriptionName, ruleName).then(); } /** * Deletes a rule the matching {@code ruleName} and returns the HTTP response. * * @param topicName Name of topic associated with rule to delete. * @param subscriptionName Name of the subscription associated with the rule to delete. * @param ruleName Name of rule to delete. * * @return A Mono that completes when the rule is deleted and returns the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName}, {@code subscriptionName}, or {@code ruleName} is an * empty string. * @throws NullPointerException if {@code topicName}, {@code subscriptionName}, or {@code ruleName} is null. * @throws ResourceNotFoundException if the {@code ruleName} does not exist. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteRuleWithResponse(String topicName, String subscriptionName, String ruleName) { return withContext(context -> deleteRuleWithResponse(topicName, subscriptionName, ruleName, context)); } /** * Deletes a subscription the matching {@code subscriptionName}. * * @param topicName Name of topic associated with subscription to delete. * @param subscriptionName Name of subscription to delete. * * @return A Mono that completes when the subscription is deleted. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} is null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteSubscription(String topicName, String subscriptionName) { return deleteSubscriptionWithResponse(topicName, subscriptionName).then(); } /** * Deletes a subscription the matching {@code subscriptionName} and returns the HTTP response. * * @param topicName Name of topic associated with subscription to delete. * @param subscriptionName Name of subscription to delete. * * @return A Mono that completes when the subscription is deleted and returns the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} is null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteSubscriptionWithResponse(String topicName, String subscriptionName) { return withContext(context -> deleteSubscriptionWithResponse(topicName, subscriptionName, context)); } /** * Deletes a topic the matching {@code topicName}. * * @param topicName Name of topic to delete. * * @return A Mono that completes when the topic is deleted. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteTopic(String topicName) { return deleteTopicWithResponse(topicName).then(); } /** * Deletes a topic the matching {@code topicName} and returns the HTTP response. * * @param topicName Name of topic to delete. * * @return A Mono that completes when the topic is deleted and returns the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteTopicWithResponse(String topicName) { return withContext(context -> deleteTopicWithResponse(topicName, context)); } /** * Gets information about the queue. * * @param queueName Name of queue to get information about. * * @return A Mono that completes with information about the queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueProperties> getQueue(String queueName) { return getQueueWithResponse(queueName).map(Response::getValue); } /** * Gets information about the queue along with its HTTP response. * * @param queueName Name of queue to get information about. * * @return A Mono that completes with information about the queue and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<QueueProperties>> getQueueWithResponse(String queueName) { return withContext(context -> getQueueWithResponse(queueName, context, Function.identity())); } /** * Gets whether or not a queue with {@code queueName} exists in the Service Bus namespace. * * @param queueName Name of the queue. * * @return A Mono that completes indicating whether or not the queue exists. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Boolean> getQueueExists(String queueName) { return getQueueExistsWithResponse(queueName).map(Response::getValue); } /** * Gets whether or not a queue with {@code queueName} exists in the Service Bus namespace. * * @param queueName Name of the queue. * * @return A Mono that completes indicating whether or not the queue exists along with its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Boolean>> getQueueExistsWithResponse(String queueName) { return getEntityExistsWithResponse(getQueueWithResponse(queueName)); } /** * Gets runtime properties about the queue. * * @param queueName Name of queue to get information about. * * @return A Mono that completes with runtime properties about the queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueRuntimeProperties> getQueueRuntimeProperties(String queueName) { return getQueueRuntimePropertiesWithResponse(queueName).map(response -> response.getValue()); } /** * Gets runtime properties about the queue along with its HTTP response. * * @param queueName Name of queue to get information about. * * @return A Mono that completes with runtime properties about the queue and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<QueueRuntimeProperties>> getQueueRuntimePropertiesWithResponse(String queueName) { return withContext(context -> getQueueWithResponse(queueName, context, QueueRuntimeProperties::new)); } /** * Gets information about the Service Bus namespace. * * @return A Mono that completes with information about the Service Bus namespace. * @throws ClientAuthenticationException if the client's credentials do not have access to the namespace. * @throws HttpResponseException If error occurred processing the request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<NamespaceProperties> getNamespaceProperties() { return getNamespacePropertiesWithResponse().map(Response::getValue); } /** * Gets information about the Service Bus namespace along with its HTTP response. * * @return A Mono that completes with information about the namespace and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<NamespaceProperties>> getNamespacePropertiesWithResponse() { return withContext(this::getNamespacePropertiesWithResponse); } /** * Gets a rule from the service namespace. * * Only following data types are deserialized in Filters and Action parameters - string, int, long, boolean, double, * and OffsetDateTime. Other data types would return its string value. * * @param topicName The name of the topic relative to service bus namespace. * @param subscriptionName The subscription name the rule belongs to. * @param ruleName The name of the rule to retrieve. * * @return The associated rule. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<RuleProperties> getRule(String topicName, String subscriptionName, String ruleName) { return getRuleWithResponse(topicName, subscriptionName, ruleName).map(response -> response.getValue()); } /** * Gets a rule from the service namespace. * * Only following data types are deserialized in Filters and Action parameters - string, int, long, bool, double, * and OffsetDateTime. Other data types would return its string value. * * @param topicName The name of the topic relative to service bus namespace. * @param subscriptionName The subscription name the rule belongs to. * @param ruleName The name of the rule to retrieve. * * @return The associated rule with the corresponding HTTP response. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<RuleProperties>> getRuleWithResponse(String topicName, String subscriptionName, String ruleName) { return withContext(context -> getRuleWithResponse(topicName, subscriptionName, ruleName, context)); } /** * Gets information about the queue. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return A Mono that completes with information about the subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are empty strings. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} are null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist in the {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionProperties> getSubscription(String topicName, String subscriptionName) { return getSubscriptionWithResponse(topicName, subscriptionName).map(Response::getValue); } /** * Gets information about the subscription along with its HTTP response. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return A Mono that completes with information about the subscription and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are empty strings. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} are null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SubscriptionProperties>> getSubscriptionWithResponse(String topicName, String subscriptionName) { return withContext(context -> getSubscriptionWithResponse(topicName, subscriptionName, context, Function.identity())); } /** * Gets whether or not a subscription within a topic exists. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of the subscription. * * @return A Mono that completes indicating whether or not the subscription exists. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code subscriptionName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Boolean> getSubscriptionExists(String topicName, String subscriptionName) { return getSubscriptionExistsWithResponse(topicName, subscriptionName).map(Response::getValue); } /** * Gets whether or not a subscription within a topic exists. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of the subscription. * * @return A Mono that completes indicating whether or not the subscription exists along with its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code subscriptionName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Boolean>> getSubscriptionExistsWithResponse(String topicName, String subscriptionName) { return getEntityExistsWithResponse(getSubscriptionWithResponse(topicName, subscriptionName)); } /** * Gets runtime properties about the subscription. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return A Mono that completes with runtime properties about the subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are empty strings. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} are null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionRuntimeProperties> getSubscriptionRuntimeProperties( String topicName, String subscriptionName) { return getSubscriptionRuntimePropertiesWithResponse(topicName, subscriptionName) .map(response -> response.getValue()); } /** * Gets runtime properties about the subscription. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return A Mono that completes with runtime properties about the subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code subscriptionName} is null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SubscriptionRuntimeProperties>> getSubscriptionRuntimePropertiesWithResponse( String topicName, String subscriptionName) { return withContext(context -> getSubscriptionWithResponse(topicName, subscriptionName, context, SubscriptionRuntimeProperties::new)); } /** * Gets information about the topic. * * @param topicName Name of topic to get information about. * * @return A Mono that completes with information about the topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicProperties> getTopic(String topicName) { return getTopicWithResponse(topicName).map(Response::getValue); } /** * Gets information about the topic along with its HTTP response. * * @param topicName Name of topic to get information about. * * @return A Mono that completes with information about the topic and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<TopicProperties>> getTopicWithResponse(String topicName) { return withContext(context -> getTopicWithResponse(topicName, context, Function.identity())); } /** * Gets whether or not a topic with {@code topicName} exists in the Service Bus namespace. * * @param topicName Name of the topic. * * @return A Mono that completes indicating whether or not the topic exists. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Boolean> getTopicExists(String topicName) { return getTopicExistsWithResponse(topicName).map(Response::getValue); } /** * Gets whether or not a topic with {@code topicName} exists in the Service Bus namespace. * * @param topicName Name of the topic. * * @return A Mono that completes indicating whether or not the topic exists along with its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Boolean>> getTopicExistsWithResponse(String topicName) { return getEntityExistsWithResponse(getTopicWithResponse(topicName)); } /** * Gets runtime properties about the topic. * * @param topicName Name of topic to get information about. * * @return A Mono that completes with runtime properties about the topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicRuntimeProperties> getTopicRuntimeProperties(String topicName) { return getTopicRuntimePropertiesWithResponse(topicName).map(response -> response.getValue()); } /** * Gets runtime properties about the topic with its HTTP response. * * @param topicName Name of topic to get information about. * * @return A Mono that completes with runtime properties about the topic and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<TopicRuntimeProperties>> getTopicRuntimePropertiesWithResponse(String topicName) { return withContext(context -> getTopicWithResponse(topicName, context, TopicRuntimeProperties::new)); } /** * Fetches all the queues in the Service Bus namespace. * * @return A Flux of {@link QueueProperties queues} in the Service Bus namespace. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<QueueProperties> listQueues() { return new PagedFlux<>( () -> withContext(context -> listQueuesFirstPage(context)), token -> withContext(context -> listQueuesNextPage(token, context))); } /** * Fetches all the rules for a topic and subscription. * * @param topicName The topic name under which all the rules need to be retrieved. * @param subscriptionName The name of the subscription for which all rules need to be retrieved. * * @return A Flux of {@link RuleProperties rules} for the {@code topicName} and {@code subscriptionName}. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} is null. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} is an empty string. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<RuleProperties> listRules(String topicName, String subscriptionName) { if (topicName == null) { return pagedFluxError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return pagedFluxError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } return new PagedFlux<>( () -> withContext(context -> listRulesFirstPage(topicName, subscriptionName, context)), token -> withContext(context -> listRulesNextPage(topicName, subscriptionName, token, context))); } /** * Fetches all the subscriptions for a topic. * * @param topicName The topic name under which all the subscriptions need to be retrieved. * * @return A Flux of {@link SubscriptionProperties subscriptions} for the {@code topicName}. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws NullPointerException if {@code topicName} is null. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<SubscriptionProperties> listSubscriptions(String topicName) { if (topicName == null) { return pagedFluxError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return pagedFluxError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } return new PagedFlux<>( () -> withContext(context -> listSubscriptionsFirstPage(topicName, context)), token -> withContext(context -> listSubscriptionsNextPage(topicName, token, context))); } /** * Fetches all the topics in the Service Bus namespace. * * @return A Flux of {@link TopicProperties topics} in the Service Bus namespace. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<TopicProperties> listTopics() { return new PagedFlux<>( () -> withContext(context -> listTopicsFirstPage(context)), token -> withContext(context -> listTopicsNextPage(token, context))); } /** * Updates a queue with the given {@link QueueProperties}. The {@link QueueProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link QueueProperties * <li>{@link QueueProperties * <li>{@link QueueProperties * </li> * <li>{@link QueueProperties * </ul> * * @param queue Information about the queue to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that completes with the updated queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code queue} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueProperties> updateQueue(QueueProperties queue) { return updateQueueWithResponse(queue).map(Response::getValue); } /** * Updates a queue with the given {@link QueueProperties}. The {@link QueueProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link QueueProperties * <li>{@link QueueProperties * <li>{@link QueueProperties * </li> * <li>{@link QueueProperties * </ul> * * @param queue Information about the queue to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated queue in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code queue} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<QueueProperties>> updateQueueWithResponse(QueueProperties queue) { return withContext(context -> updateQueueWithResponse(queue, context)); } /** * Updates a rule with the given {@link RuleProperties}. The {@link RuleProperties} must be fully populated as all * of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * @param topicName The topic name under which the rule is updated. * @param subscriptionName The name of the subscription for which the rule is updated. * @param rule Information about the rule to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated rule. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the rule quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link RuleProperties * @throws NullPointerException if {@code rule} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<RuleProperties> updateRule(String topicName, String subscriptionName, RuleProperties rule) { return updateRuleWithResponse(topicName, subscriptionName, rule).map(Response::getValue); } /** * Updates a rule with the given {@link RuleProperties}. The {@link RuleProperties} must be fully populated as all * of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * @param topicName The topic name under which the rule is updated. * @param subscriptionName The name of the subscription for which the rule is updated. * @param rule Information about the rule to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated rule in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the rule quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link RuleProperties * @throws NullPointerException if {@code rule} is null. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<RuleProperties>> updateRuleWithResponse(String topicName, String subscriptionName, RuleProperties rule) { return withContext(context -> updateRuleWithResponse(topicName, subscriptionName, rule, context)); } /** * Updates a subscription with the given {@link SubscriptionProperties}. The {@link SubscriptionProperties} must be * fully populated as all of the properties are replaced. If a property is not set the service default value is * used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * </ul> * * @param subscription Information about the subscription to update. You must provide all the property values * that are desired on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the subscription quota is exceeded, or an * error occurred processing the request. * @throws IllegalArgumentException if {@link SubscriptionProperties * SubscriptionProperties * @throws NullPointerException if {@code subscription} is null. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionProperties> updateSubscription(SubscriptionProperties subscription) { return updateSubscriptionWithResponse(subscription).map(Response::getValue); } /** * Updates a subscription with the given {@link SubscriptionProperties}. The {@link SubscriptionProperties} must be * fully populated as all of the properties are replaced. If a property is not set the service default value is * used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * </ul> * * @param subscription Information about the subscription to update. You must provide all the property values * that are desired on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated subscription in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the subscription quota is exceeded, or an * error occurred processing the request. * @throws IllegalArgumentException if {@link SubscriptionProperties * SubscriptionProperties * @throws NullPointerException if {@code subscription} is null. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SubscriptionProperties>> updateSubscriptionWithResponse( SubscriptionProperties subscription) { return withContext(context -> updateSubscriptionWithResponse(subscription, context)); } /** * Updates a topic with the given {@link TopicProperties}. The {@link TopicProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link TopicProperties * <li>{@link TopicProperties * </li> * </ul> * * @param topic Information about the topic to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that completes with the updated topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link TopicProperties * string. * @throws NullPointerException if {@code topic} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicProperties> updateTopic(TopicProperties topic) { return updateTopicWithResponse(topic).map(Response::getValue); } /** * Updates a topic with the given {@link TopicProperties}. The {@link TopicProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link TopicProperties * <li>{@link TopicProperties * </li> * </ul> * * @param topic Information about the topic to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that completes with the updated topic and its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link TopicProperties * string. * @throws NullPointerException if {@code topic} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<TopicProperties>> updateTopicWithResponse(TopicProperties topic) { return withContext(context -> updateTopicWithResponse(topic, context)); } /** * Creates a queue with its context. * * @param createQueueOptions Queue to create. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link QueueProperties}. */ Mono<Response<QueueProperties>> createQueueWithResponse(String queueName, CreateQueueOptions createQueueOptions, Context context) { if (queueName == null) { return monoError(logger, new NullPointerException("'queueName' cannot be null.")); } else if (queueName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'queueName' cannot be empty.")); } if (createQueueOptions == null) { return monoError(logger, new NullPointerException("'createQueueOptions' cannot be null.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context contextWithHeaders = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE) .addData(AZURE_REQUEST_HTTP_HEADERS_KEY, new HttpHeaders()); final String forwardToEntity = createQueueOptions.getForwardTo(); if (!CoreUtils.isNullOrEmpty(forwardToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardToEntity, contextWithHeaders); createQueueOptions.setForwardTo(getAbsoluteUrlFromEntity(forwardToEntity)); } final String forwardDlqToEntity = createQueueOptions.getForwardDeadLetteredMessagesTo(); if (!CoreUtils.isNullOrEmpty(forwardDlqToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_DLQ_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardDlqToEntity, contextWithHeaders); createQueueOptions.setForwardDeadLetteredMessagesTo(getAbsoluteUrlFromEntity(forwardDlqToEntity)); } final QueueDescription description = EntityHelper.getQueueDescription(createQueueOptions); final CreateQueueBodyContent content = new CreateQueueBodyContent() .setType(CONTENT_TYPE) .setQueueDescription(description); final CreateQueueBody createEntity = new CreateQueueBody() .setContent(content); try { return entityClient.putWithResponseAsync(queueName, createEntity, null, contextWithHeaders) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(this::deserializeQueue); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a rule with its context. * * @param ruleOptions Rule to create. * @param context Context to pass into request. * * * @return A Mono that completes with the created {@link RuleProperties}. */ Mono<Response<RuleProperties>> createRuleWithResponse(String topicName, String subscriptionName, String ruleName, CreateRuleOptions ruleOptions, Context context) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be empty.")); } if (subscriptionName == null) { return monoError(logger, new NullPointerException("'subscriptionName' cannot be null.")); } else if (subscriptionName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'subscriptionName' cannot be empty.")); } if (ruleName == null) { return monoError(logger, new NullPointerException("'ruleName' cannot be null.")); } else if (ruleName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'ruleName' cannot be empty.")); } if (ruleOptions == null) { return monoError(logger, new NullPointerException("'rule' cannot be null.")); } final RuleActionImpl action = ruleOptions.getAction() != null ? EntityHelper.toImplementation(ruleOptions.getAction()) : null; final RuleFilterImpl filter = ruleOptions.getFilter() != null ? EntityHelper.toImplementation(ruleOptions.getFilter()) : null; final RuleDescription rule = new RuleDescription() .setAction(action) .setFilter(filter) .setName(ruleName); final CreateRuleBodyContent content = new CreateRuleBodyContent() .setType(CONTENT_TYPE) .setRuleDescription(rule); final CreateRuleBody createEntity = new CreateRuleBody().setContent(content); final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return managementClient.getRules().putWithResponseAsync(topicName, subscriptionName, ruleName, createEntity, null, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeRule(response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a subscription with its context. * * @param subscriptionOptions Subscription to create. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link SubscriptionProperties}. */ Mono<Response<SubscriptionProperties>> createSubscriptionWithResponse(String topicName, String subscriptionName, CreateSubscriptionOptions subscriptionOptions, Context context) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be empty.")); } if (subscriptionName == null) { return monoError(logger, new NullPointerException("'subscriptionName' cannot be null.")); } else if (subscriptionName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'subscriptionName' cannot be empty.")); } if (subscriptionOptions == null) { return monoError(logger, new NullPointerException("'subscription' cannot be null.")); } final Context contextWithHeaders = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE) .addData(AZURE_REQUEST_HTTP_HEADERS_KEY, new HttpHeaders()); final String forwardToEntity = subscriptionOptions.getForwardTo(); if (!CoreUtils.isNullOrEmpty(forwardToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardToEntity, contextWithHeaders); subscriptionOptions.setForwardTo(getAbsoluteUrlFromEntity(forwardToEntity)); } final String forwardDlqToEntity = subscriptionOptions.getForwardDeadLetteredMessagesTo(); if (!CoreUtils.isNullOrEmpty(forwardDlqToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_DLQ_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardDlqToEntity, contextWithHeaders); subscriptionOptions.setForwardDeadLetteredMessagesTo(getAbsoluteUrlFromEntity(forwardDlqToEntity)); } final SubscriptionDescription subscription = EntityHelper.getSubscriptionDescription(subscriptionOptions); final CreateSubscriptionBodyContent content = new CreateSubscriptionBodyContent() .setType(CONTENT_TYPE) .setSubscriptionDescription(subscription); final CreateSubscriptionBody createEntity = new CreateSubscriptionBody().setContent(content); try { return managementClient.getSubscriptions().putWithResponseAsync(topicName, subscriptionName, createEntity, null, contextWithHeaders) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeSubscription(topicName, response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a topicOptions with its context. * * @param topicOptions Topic to create. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link TopicProperties}. */ Mono<Response<TopicProperties>> createTopicWithResponse(String topicName, CreateTopicOptions topicOptions, Context context) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be empty.")); } if (topicOptions == null) { return monoError(logger, new NullPointerException("'topicOptions' cannot be null")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final TopicDescription topic = EntityHelper.getTopicDescription(topicOptions); final CreateTopicBodyContent content = new CreateTopicBodyContent() .setType(CONTENT_TYPE) .setTopicDescription(topic); final CreateTopicBody createEntity = new CreateTopicBody() .setContent(content); final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return entityClient.putWithResponseAsync(topicName, createEntity, null, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(this::deserializeTopic); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes a queue with its context. * * @param queueName Name of queue to delete. * @param context Context to pass into request. * * @return A Mono that completes when the queue is deleted. */ Mono<Response<Void>> deleteQueueWithResponse(String queueName, Context context) { if (queueName == null) { return monoError(logger, new NullPointerException("'queueName' cannot be null")); } else if (queueName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'queueName' cannot be an empty string.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return entityClient.deleteWithResponseAsync(queueName, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes a queue with its context. * * @param topicName Name of topic to delete. * @param subscriptionName Name of the subscription for the rule. * @param ruleName Name of the rule. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link QueueProperties}. */ Mono<Response<Void>> deleteRuleWithResponse(String topicName, String subscriptionName, String ruleName, Context context) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } else if (subscriptionName == null) { return monoError(logger, new NullPointerException("'subscriptionName' cannot be null")); } else if (subscriptionName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'subscriptionName' cannot be an empty string.")); } else if (ruleName == null) { return monoError(logger, new NullPointerException("'ruleName' cannot be null")); } else if (ruleName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'ruleName' cannot be an empty string.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return rulesClient.deleteWithResponseAsync(topicName, subscriptionName, ruleName, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes a subscription with its context. * * @param topicName Name of topic associated with subscription to delete. * @param subscriptionName Name of subscription to delete. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link SubscriptionProperties}. */ Mono<Response<Void>> deleteSubscriptionWithResponse(String topicName, String subscriptionName, Context context) { if (subscriptionName == null) { return monoError(logger, new NullPointerException("'subscriptionName' cannot be null")); } else if (subscriptionName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'subscriptionName' cannot be an empty string.")); } else if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return managementClient.getSubscriptions().deleteWithResponseAsync(topicName, subscriptionName, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes a topic with its context. * * @param topicName Name of topic to delete. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link TopicProperties}. */ Mono<Response<Void>> deleteTopicWithResponse(String topicName, Context context) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return entityClient.deleteWithResponseAsync(topicName, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets whether an entity exists. * * @param getEntityOperation Operation to get information about entity. If {@link ResourceNotFoundException} is * thrown, then it is mapped to false. * @param <T> Entity type. * * @return True if the entity exists, false otherwise. */ <T> Mono<Response<Boolean>> getEntityExistsWithResponse(Mono<Response<T>> getEntityOperation) { return getEntityOperation.map(response -> { final boolean exists = response.getValue() != null; return (Response<Boolean>) new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), exists); }) .onErrorResume(ResourceNotFoundException.class, exception -> { final HttpResponse response = exception.getResponse(); final Response<Boolean> result = new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), false); return Mono.just(result); }); } /** * Gets a queue with its context. * * @param queueName Name of queue to fetch information for. * @param context Context to pass into request. * * @return A Mono that completes with the {@link QueueProperties}. */ <T> Mono<Response<T>> getQueueWithResponse(String queueName, Context context, Function<QueueProperties, T> mapper) { if (queueName == null) { return monoError(logger, new NullPointerException("'queueName' cannot be null")); } else if (queueName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'queueName' cannot be empty.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return entityClient.getWithResponseAsync(queueName, true, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .handle((response, sink) -> { final Response<QueueProperties> deserialize = deserializeQueue(response); if (deserialize.getValue() == null) { final HttpResponse notFoundResponse = new EntityNotFoundHttpResponse<>(deserialize); sink.error(new ResourceNotFoundException(String.format("Queue '%s' does not exist.", queueName), notFoundResponse)); } else { final T mapped = mapper.apply(deserialize.getValue()); sink.next(new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), mapped)); } }); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<RuleProperties>> getRuleWithResponse(String topicName, String subscriptionName, String ruleName, Context context) { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return rulesClient.getWithResponseAsync(topicName, subscriptionName, ruleName, true, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(this::deserializeRule); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets a subscription with its context. * * @param topicName Name of the topic associated with the subscription. * @param subscriptionName Name of subscription to fetch information for. * @param context Context to pass into request. * * @return A Mono that completes with the {@link SubscriptionProperties}. */ <T> Mono<Response<T>> getSubscriptionWithResponse(String topicName, String subscriptionName, Context context, Function<SubscriptionProperties, T> mapper) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } else if (subscriptionName == null) { return monoError(logger, new NullPointerException("'subscriptionName' cannot be null.")); } else if (subscriptionName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'subscriptionName' cannot be an empty string.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return managementClient.getSubscriptions().getWithResponseAsync(topicName, subscriptionName, true, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .handle((response, sink) -> { final Response<SubscriptionProperties> deserialize = deserializeSubscription(topicName, response); if (deserialize.getValue() == null) { final HttpResponse notFoundResponse = new EntityNotFoundHttpResponse<>(deserialize); sink.error(new ResourceNotFoundException(String.format( "Subscription '%s' in topic '%s' does not exist.", topicName, subscriptionName), notFoundResponse)); } else { final T mapped = mapper.apply(deserialize.getValue()); sink.next(new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), mapped)); } }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets the namespace properties with its context. * * @param context Context to pass into request. * * @return A Mono that completes with the {@link NamespaceProperties}. */ Mono<Response<NamespaceProperties>> getNamespacePropertiesWithResponse(Context context) { return managementClient.getNamespaces().getWithResponseAsync(context).handle((response, sink) -> { final NamespacePropertiesEntry entry = response.getValue(); if (entry == null || entry.getContent() == null) { sink.error(new AzureException( "There was no content inside namespace response. Entry: " + response)); return; } final NamespaceProperties namespaceProperties = entry.getContent().getNamespaceProperties(); final Response<NamespaceProperties> result = new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), namespaceProperties); sink.next(result); }); } /** * Gets a topic with its context. * * @param topicName Name of topic to fetch information for. * @param context Context to pass into request. * * @return A Mono that completes with the {@link TopicProperties}. */ <T> Mono<Response<T>> getTopicWithResponse(String topicName, Context context, Function<TopicProperties, T> mapper) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be empty.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return entityClient.getWithResponseAsync(topicName, true, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .handle((response, sink) -> { final Response<TopicProperties> deserialize = deserializeTopic(response); if (deserialize.getValue() == null) { final HttpResponse notFoundResponse = new EntityNotFoundHttpResponse<>(deserialize); sink.error(new ResourceNotFoundException(String.format("Topic '%s' does not exist.", topicName), notFoundResponse)); } else { final T mapped = mapper.apply(deserialize.getValue()); sink.next(new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), mapped)); } }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets the first page of queues with context. * * @param context Context to pass into request. * * @return A Mono that completes with a page of queues. */ Mono<PagedResponse<QueueProperties>> listQueuesFirstPage(Context context) { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return listQueues(0, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the next page of queues with context. * * @param continuationToken Number of items to skip in feed. * @param context Context to pass into request. * * @return A Mono that completes with a page of queues or empty if there are no items left. */ Mono<PagedResponse<QueueProperties>> listQueuesNextPage(String continuationToken, Context context) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } try { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); final int skip = Integer.parseInt(continuationToken); return listQueues(skip, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the first page of rules with context. * * @param context Context to pass into request. * * @return A Mono that completes with a page of rules. */ Mono<PagedResponse<RuleProperties>> listRulesFirstPage(String topicName, String subscriptionName, Context context) { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return listRules(topicName, subscriptionName, 0, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the next page of rules with context. * * @param continuationToken Number of items to skip in feed. * @param context Context to pass into request. * * @return A Mono that completes with a page of rules or empty if there are no items left. */ Mono<PagedResponse<RuleProperties>> listRulesNextPage(String topicName, String subscriptionName, String continuationToken, Context context) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } try { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); final int skip = Integer.parseInt(continuationToken); return listRules(topicName, subscriptionName, skip, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the first page of subscriptions with context. * * @param context Context to pass into request. * * @return A Mono that completes with a page of subscriptions. */ Mono<PagedResponse<SubscriptionProperties>> listSubscriptionsFirstPage(String topicName, Context context) { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return listSubscriptions(topicName, 0, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the next page of subscriptions with context. * * @param continuationToken Number of items to skip in feed. * @param context Context to pass into request. * * @return A Mono that completes with a page of subscriptions or empty if there are no items left. */ Mono<PagedResponse<SubscriptionProperties>> listSubscriptionsNextPage(String topicName, String continuationToken, Context context) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } try { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); final int skip = Integer.parseInt(continuationToken); return listSubscriptions(topicName, skip, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the first page of topics with context. * * @param context Context to pass into request. * * @return A Mono that completes with a page of topics. */ Mono<PagedResponse<TopicProperties>> listTopicsFirstPage(Context context) { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return listTopics(0, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the next page of topics with context. * * @param continuationToken Number of items to skip in feed. * @param context Context to pass into request. * * @return A Mono that completes with a page of topics or empty if there are no items left. */ Mono<PagedResponse<TopicProperties>> listTopicsNextPage(String continuationToken, Context context) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } try { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); final int skip = Integer.parseInt(continuationToken); return listTopics(skip, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Updates a queue with its context. * * @param queue Information about the queue to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * @param context Context to pass into request. * * @return A Mono that completes with the updated {@link QueueProperties}. */ Mono<Response<QueueProperties>> updateQueueWithResponse(QueueProperties queue, Context context) { if (queue == null) { final Context contextWithHeaders = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE) .addData(AZURE_REQUEST_HTTP_HEADERS_KEY, new HttpHeaders()); final String forwardToEntity = queue.getForwardTo(); if (!CoreUtils.isNullOrEmpty(forwardToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardToEntity, contextWithHeaders); queue.setForwardTo(getAbsoluteUrlFromEntity(forwardToEntity)); } final String forwardDlqToEntity = queue.getForwardDeadLetteredMessagesTo(); if (!CoreUtils.isNullOrEmpty(forwardDlqToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_DLQ_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardDlqToEntity, contextWithHeaders); queue.setForwardDeadLetteredMessagesTo(getAbsoluteUrlFromEntity(forwardDlqToEntity)); } final QueueDescription queueDescription = EntityHelper.toImplementation(queue); final CreateQueueBodyContent content = new CreateQueueBodyContent() .setType(CONTENT_TYPE) .setQueueDescription(queueDescription); final CreateQueueBody createEntity = new CreateQueueBody() .setContent(content); try { return entityClient.putWithResponseAsync(queue.getName(), createEntity, "*", contextWithHeaders) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeQueue(response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates a rule with its context. * * @param rule Information about the rule to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * @param context Context to pass into request. * * @return A Mono that completes with the updated {@link RuleProperties}. */ Mono<Response<RuleProperties>> updateRuleWithResponse(String topicName, String subscriptionName, RuleProperties rule, Context context) { if (rule == null) { return monoError(logger, new NullPointerException("'rule' cannot be null")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final RuleDescription implementation = EntityHelper.toImplementation(rule); final CreateRuleBodyContent content = new CreateRuleBodyContent() .setType(CONTENT_TYPE) .setRuleDescription(implementation); final CreateRuleBody ruleBody = new CreateRuleBody() .setContent(content); final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return managementClient.getRules().putWithResponseAsync(topicName, subscriptionName, rule.getName(), ruleBody, "*", withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeRule(response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates a subscription with its context. * * @param subscription Information about the subscription to update. You must provide all the property values * that are desired on the updated entity. Any values not provided are set to the service default values. * @param context Context to pass into request. * * @return A Mono that completes with the updated {@link SubscriptionProperties}. */ Mono<Response<SubscriptionProperties>> updateSubscriptionWithResponse(SubscriptionProperties subscription, Context context) { if (subscription == null) { return monoError(logger, new NullPointerException("'subscription' cannot be null")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context contextWithHeaders = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE) .addData(AZURE_REQUEST_HTTP_HEADERS_KEY, new HttpHeaders()); final String forwardToEntity = subscription.getForwardTo(); if (!CoreUtils.isNullOrEmpty(forwardToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardToEntity, contextWithHeaders); subscription.setForwardTo(getAbsoluteUrlFromEntity(forwardToEntity)); } final String forwardDlqToEntity = subscription.getForwardDeadLetteredMessagesTo(); if (!CoreUtils.isNullOrEmpty(forwardDlqToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_DLQ_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardDlqToEntity, contextWithHeaders); subscription.setForwardDeadLetteredMessagesTo(getAbsoluteUrlFromEntity(forwardDlqToEntity)); } final String topicName = subscription.getTopicName(); final String subscriptionName = subscription.getSubscriptionName(); final SubscriptionDescription implementation = EntityHelper.toImplementation(subscription); final CreateSubscriptionBodyContent content = new CreateSubscriptionBodyContent() .setType(CONTENT_TYPE) .setSubscriptionDescription(implementation); final CreateSubscriptionBody createEntity = new CreateSubscriptionBody() .setContent(content); try { return managementClient.getSubscriptions().putWithResponseAsync(topicName, subscriptionName, createEntity, "*", contextWithHeaders) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeSubscription(topicName, response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates a topic with its context. * * @param topic Information about the topic to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * @param context Context to pass into request. * * @return A Mono that completes with the updated {@link TopicProperties}. */ Mono<Response<TopicProperties>> updateTopicWithResponse(TopicProperties topic, Context context) { if (topic == null) { return monoError(logger, new NullPointerException("'topic' cannot be null")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final TopicDescription implementation = EntityHelper.toImplementation(topic); final CreateTopicBodyContent content = new CreateTopicBodyContent() .setType(CONTENT_TYPE) .setTopicDescription(implementation); final CreateTopicBody createEntity = new CreateTopicBody() .setContent(content); final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return entityClient.putWithResponseAsync(topic.getName(), createEntity, "*", withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeTopic(response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } private <T> T deserialize(Object object, Class<T> clazz) { if (object == null) { return null; } final String contents = String.valueOf(object); if (contents.isEmpty()) { return null; } try { return serializer.deserialize(contents, clazz); } catch (IOException e) { throw logger.logExceptionAsError(new RuntimeException(String.format( "Exception while deserializing. Body: [%s]. Class: %s", contents, clazz), e)); } } /** * Given an HTTP response, will deserialize it into a strongly typed Response object. * * @param response HTTP response to deserialize response body from. * @param clazz Class to deserialize response type into. * @param <T> Class type to deserialize response into. * * @return A Response with a strongly typed response value. */ private <T> Response<T> deserialize(Response<Object> response, Class<T> clazz) { final T deserialize = deserialize(response.getValue(), clazz); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), deserialize); } /** * Converts a Response into its corresponding {@link QueueDescriptionEntry} then mapped into {@link * QueueProperties}. * * @param response HTTP Response to deserialize. * * @return The corresponding HTTP response with convenience properties set. */ private Response<QueueProperties> deserializeQueue(Response<Object> response) { final QueueDescriptionEntry entry = deserialize(response.getValue(), QueueDescriptionEntry.class); if (entry == null) { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent() == null) { logger.info("entry.getContent() is null. The entity may not exist. {}", entry); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent().getQueueDescription() == null) { final TopicDescriptionEntry entryTopic = deserialize(response.getValue(), TopicDescriptionEntry.class); if (entryTopic != null && entryTopic.getContent() != null && entryTopic.getContent().getTopicDescription() != null) { logger.warning("'{}' is not a queue, it is a topic.", entryTopic.getTitle()); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } } final QueueProperties result = EntityHelper.toModel(entry.getContent().getQueueDescription()); final String queueName = getTitleValue(entry.getTitle()); EntityHelper.setQueueName(result, queueName); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), result); } /** * Converts a Response into its corresponding {@link RuleDescriptionEntry} then mapped into {@link RuleProperties}. * * @param response HTTP Response to deserialize. * * @return The corresponding HTTP response with convenience properties set. */ private Response<RuleProperties> deserializeRule(Response<Object> response) { final RuleDescriptionEntry entry = deserialize(response.getValue(), RuleDescriptionEntry.class); if (entry == null) { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent() == null) { logger.info("entry.getContent() is null. The entity may not exist. {}", entry); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } final RuleDescription description = entry.getContent().getRuleDescription(); final RuleProperties result = EntityHelper.toModel(description); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), result); } /** * Converts a Response into its corresponding {@link SubscriptionDescriptionEntry} then mapped into {@link * SubscriptionProperties}. * * @param response HTTP Response to deserialize. * * @return The corresponding HTTP response with convenience properties set. */ private Response<SubscriptionProperties> deserializeSubscription(String topicName, Response<Object> response) { final SubscriptionDescriptionEntry entry = deserialize(response.getValue(), SubscriptionDescriptionEntry.class); if (entry == null) { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent() == null) { logger.warning("entry.getContent() is null. There should have been content returned. Entry: {}", entry); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } final SubscriptionProperties subscription = EntityHelper.toModel( entry.getContent().getSubscriptionDescription()); final String subscriptionName = getTitleValue(entry.getTitle()); EntityHelper.setSubscriptionName(subscription, subscriptionName); EntityHelper.setTopicName(subscription, topicName); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), subscription); } /** * Converts a Response into its corresponding {@link TopicDescriptionEntry} then mapped into {@link * QueueProperties}. * * @param response HTTP Response to deserialize. * * @return The corresponding HTTP response with convenience properties set. */ private Response<TopicProperties> deserializeTopic(Response<Object> response) { final TopicDescriptionEntry entry = deserialize(response.getValue(), TopicDescriptionEntry.class); if (entry == null) { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent() == null) { logger.warning("entry.getContent() is null. There should have been content returned. Entry: {}", entry); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent().getTopicDescription() == null) { final QueueDescriptionEntry entryQueue = deserialize(response.getValue(), QueueDescriptionEntry.class); if (entryQueue != null && entryQueue.getContent() != null && entryQueue.getContent().getQueueDescription() != null) { logger.warning("'{}' is not a topic, it is a queue.", entryQueue.getTitle()); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } } final TopicProperties result = EntityHelper.toModel(entry.getContent().getTopicDescription()); final String topicName = getTitleValue(entry.getTitle()); EntityHelper.setTopicName(result, topicName); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), result); } /** * Creates a {@link FeedPage} given the elements and a set of response links to get the next link from. * * @param entities Entities in the feed. * @param responseLinks Links returned from the feed. * @param <TResult> Type of Service Bus entities in page. * * @return A {@link FeedPage} indicating whether this can be continued or not. * @throws MalformedURLException if the "next" page link does not contain a well-formed URL. */ private <TResult, TFeed> FeedPage<TResult> extractPage(Response<TFeed> response, List<TResult> entities, List<ResponseLink> responseLinks) throws MalformedURLException, UnsupportedEncodingException { final Optional<ResponseLink> nextLink = responseLinks.stream() .filter(link -> link.getRel().equalsIgnoreCase("next")) .findFirst(); if (!nextLink.isPresent()) { return new FeedPage<>(response.getStatusCode(), response.getHeaders(), response.getRequest(), entities); } final URL url = new URL(nextLink.get().getHref()); final String decode = URLDecoder.decode(url.getQuery(), StandardCharsets.UTF_8.name()); final Optional<Integer> skipParameter = Arrays.stream(decode.split("&amp;|&")) .map(part -> part.split("=", 2)) .filter(parts -> parts[0].equalsIgnoreCase("$skip") && parts.length == 2) .map(parts -> Integer.valueOf(parts[1])) .findFirst(); if (skipParameter.isPresent()) { return new FeedPage<>(response.getStatusCode(), response.getHeaders(), response.getRequest(), entities, skipParameter.get()); } else { logger.warning("There should have been a skip parameter for the next page."); return new FeedPage<>(response.getStatusCode(), response.getHeaders(), response.getRequest(), entities); } } /** * Helper method that invokes the service method, extracts the data and translates it to a PagedResponse. * * @param skip Number of elements to skip. * @param context Context for the query. * * @return A Mono that completes with a paged response of queues. */ private Mono<PagedResponse<QueueProperties>> listQueues(int skip, Context context) { return managementClient.listEntitiesWithResponseAsync(QUEUES_ENTITY_TYPE, skip, NUMBER_OF_ELEMENTS, context) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .flatMap(response -> { final Response<QueueDescriptionFeed> feedResponse = deserialize(response, QueueDescriptionFeed.class); final QueueDescriptionFeed feed = feedResponse.getValue(); if (feed == null) { logger.warning("Could not deserialize QueueDescriptionFeed. skip {}, top: {}", skip, NUMBER_OF_ELEMENTS); return Mono.empty(); } final List<QueueProperties> entities = feed.getEntry().stream() .filter(e -> e.getContent() != null && e.getContent().getQueueDescription() != null) .map(e -> { final String queueName = getTitleValue(e.getTitle()); final QueueProperties queueProperties = EntityHelper.toModel( e.getContent().getQueueDescription()); EntityHelper.setQueueName(queueProperties, queueName); return queueProperties; }) .collect(Collectors.toList()); try { return Mono.just(extractPage(feedResponse, entities, feed.getLink())); } catch (MalformedURLException | UnsupportedEncodingException error) { return Mono.error(new RuntimeException("Could not parse response into FeedPage<QueueDescription>", error)); } }); } /** * Helper method that invokes the service method, extracts the data and translates it to a PagedResponse. * * @param skip Number of elements to skip. * @param context Context for the query. * * @return A Mono that completes with a paged response of rules. */ private Mono<PagedResponse<RuleProperties>> listRules(String topicName, String subscriptionName, int skip, Context context) { return managementClient.listRulesWithResponseAsync(topicName, subscriptionName, skip, NUMBER_OF_ELEMENTS, context) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .flatMap(response -> { final Response<RuleDescriptionFeed> feedResponse = deserialize(response, RuleDescriptionFeed.class); final RuleDescriptionFeed feed = feedResponse.getValue(); if (feed == null) { logger.warning("Could not deserialize RuleDescriptionFeed. skip {}, top: {}", skip, NUMBER_OF_ELEMENTS); return Mono.empty(); } final List<RuleProperties> entities = feed.getEntry().stream() .filter(e -> e.getContent() != null && e.getContent().getRuleDescription() != null) .map(e -> { return EntityHelper.toModel(e.getContent().getRuleDescription()); }) .collect(Collectors.toList()); try { return Mono.just(extractPage(feedResponse, entities, feed.getLink())); } catch (MalformedURLException | UnsupportedEncodingException error) { return Mono.error(new RuntimeException( "Could not parse response into FeedPage<RuleDescription>", error)); } }); } /** * Helper method that invokes the service method, extracts the data and translates it to a PagedResponse. * * @param skip Number of elements to skip. * @param context Context for the query. * * @return A Mono that completes with a paged response of subscriptions. */ private Mono<PagedResponse<SubscriptionProperties>> listSubscriptions(String topicName, int skip, Context context) { return managementClient.listSubscriptionsWithResponseAsync(topicName, skip, NUMBER_OF_ELEMENTS, context) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .flatMap(response -> { final Response<SubscriptionDescriptionFeed> feedResponse = deserialize(response, SubscriptionDescriptionFeed.class); final SubscriptionDescriptionFeed feed = feedResponse.getValue(); if (feed == null) { logger.warning("Could not deserialize SubscriptionDescriptionFeed. skip {}, top: {}", skip, NUMBER_OF_ELEMENTS); return Mono.empty(); } final List<SubscriptionProperties> entities = feed.getEntry().stream() .filter(e -> e.getContent() != null && e.getContent().getSubscriptionDescription() != null) .map(e -> { final String subscriptionName = getTitleValue(e.getTitle()); final SubscriptionProperties description = EntityHelper.toModel( e.getContent().getSubscriptionDescription()); EntityHelper.setTopicName(description, topicName); EntityHelper.setSubscriptionName(description, subscriptionName); return description; }) .collect(Collectors.toList()); try { return Mono.just(extractPage(feedResponse, entities, feed.getLink())); } catch (MalformedURLException | UnsupportedEncodingException error) { return Mono.error(new RuntimeException( "Could not parse response into FeedPage<SubscriptionDescription>", error)); } }); } /** * Helper method that invokes the service method, extracts the data and translates it to a PagedResponse. * * @param skip Number of elements to skip. * @param context Context for the query. * * @return A Mono that completes with a paged response of topics. */ private Mono<PagedResponse<TopicProperties>> listTopics(int skip, Context context) { return managementClient.listEntitiesWithResponseAsync(TOPICS_ENTITY_TYPE, skip, NUMBER_OF_ELEMENTS, context) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .flatMap(response -> { final Response<TopicDescriptionFeed> feedResponse = deserialize(response, TopicDescriptionFeed.class); final TopicDescriptionFeed feed = feedResponse.getValue(); if (feed == null) { logger.warning("Could not deserialize TopicDescriptionFeed. skip {}, top: {}", skip, NUMBER_OF_ELEMENTS); return Mono.empty(); } final List<TopicProperties> entities = feed.getEntry().stream() .filter(e -> e.getContent() != null && e.getContent().getTopicDescription() != null) .map(e -> { final String topicName = getTitleValue(e.getTitle()); final TopicProperties topicProperties = EntityHelper.toModel( e.getContent().getTopicDescription()); EntityHelper.setTopicName(topicProperties, topicName); return topicProperties; }) .collect(Collectors.toList()); try { return Mono.just(extractPage(feedResponse, entities, feed.getLink())); } catch (MalformedURLException | UnsupportedEncodingException error) { return Mono.error(new RuntimeException("Could not parse response into FeedPage<TopicDescription>", error)); } }); } /** * Check that the additional headers field is present and add the additional auth header * * @param headerName name of the header to be added * @param context current request context * * @return boolean representing the outcome of adding header operation */ private void addSupplementaryAuthHeader(String headerName, String entity, Context context) { context.getData(AZURE_REQUEST_HTTP_HEADERS_KEY) .ifPresent(headers -> { if (headers instanceof HttpHeaders) { HttpHeaders customHttpHeaders = (HttpHeaders) headers; customHttpHeaders.add(headerName, entity); } }); } /** * Checks if the given entity is an absolute URL, if so return it. * Otherwise, construct the URL from the given entity and return that. * * @param entity : entity to forward messages to. * * @return Forward to Entity represented as an absolute URL */ private String getAbsoluteUrlFromEntity(String entity) { try { URL url = new URL(entity); return url.toString(); } catch (MalformedURLException ex) { } UrlBuilder urlBuilder = new UrlBuilder(); urlBuilder.setScheme("https"); urlBuilder.setHost(managementClient.getEndpoint()); urlBuilder.setPath(entity); try { URL url = urlBuilder.toUrl(); return url.toString(); } catch (MalformedURLException ex) { logger.error("Failed to construct URL using the endpoint:'{}' and entity:'{}'", managementClient.getEndpoint(), entity); logger.logThrowableAsError(ex); } return null; } /** * Given an XML title element, returns the XML text inside. Jackson deserializes Objects as LinkedHashMaps. XML text * is represented as an entry with an empty string as the key. * * For example, the text returned from this {@code <title text="text/xml">QueueName</title>} is "QueueName". * * @param responseTitle XML title element. * * @return The XML text inside the title. {@code null} is returned if there is no value. */ @SuppressWarnings("unchecked") private String getTitleValue(Object responseTitle) { if (!(responseTitle instanceof Map)) { return null; } final Map<String, String> map; try { map = (Map<String, String>) responseTitle; return map.get(""); } catch (ClassCastException error) { logger.warning("Unable to cast to Map<String,String>. Title: {}", responseTitle, error); return null; } } /** * Maps an exception from the ATOM APIs to its associated {@link HttpResponseException}. * * @param exception Exception from the ATOM API. * * @return The corresponding {@link HttpResponseException} or {@code throwable} if it is not an instance of {@link * ServiceBusManagementErrorException}. */ private static Throwable mapException(Throwable exception) { if (!(exception instanceof ServiceBusManagementErrorException)) { return exception; } final ServiceBusManagementErrorException managementError = ((ServiceBusManagementErrorException) exception); final ServiceBusManagementError error = managementError.getValue(); final HttpResponse errorHttpResponse = managementError.getResponse(); final int statusCode = error != null && error.getCode() != null ? error.getCode() : errorHttpResponse.getStatusCode(); final String errorDetail = error != null && error.getDetail() != null ? error.getDetail() : managementError.getMessage(); switch (statusCode) { case 401: return new ClientAuthenticationException(errorDetail, managementError.getResponse(), exception); case 404: return new ResourceNotFoundException(errorDetail, managementError.getResponse(), exception); case 409: return new ResourceExistsException(errorDetail, managementError.getResponse(), exception); case 412: return new ResourceModifiedException(errorDetail, managementError.getResponse(), exception); default: return new HttpResponseException(errorDetail, managementError.getResponse(), exception); } } /** * A page of Service Bus entities. * * @param <T> The entity description from Service Bus. */ private static final class FeedPage<T> implements PagedResponse<T> { private final int statusCode; private final HttpHeaders header; private final HttpRequest request; private final IterableStream<T> entries; private final String continuationToken; /** * Creates a page that does not have any more pages. * * @param entries Items in the page. */ private FeedPage(int statusCode, HttpHeaders header, HttpRequest request, List<T> entries) { this.statusCode = statusCode; this.header = header; this.request = request; this.entries = new IterableStream<>(entries); this.continuationToken = null; } /** * Creates an instance that has additional pages to fetch. * * @param entries Items in the page. * @param skip Number of elements to "skip". */ private FeedPage(int statusCode, HttpHeaders header, HttpRequest request, List<T> entries, int skip) { this.statusCode = statusCode; this.header = header; this.request = request; this.entries = new IterableStream<>(entries); this.continuationToken = String.valueOf(skip); } @Override public IterableStream<T> getElements() { return entries; } @Override public String getContinuationToken() { return continuationToken; } @Override public int getStatusCode() { return statusCode; } @Override public HttpHeaders getHeaders() { return header; } @Override public HttpRequest getRequest() { return request; } @Override public void close() { } } private static final class EntityNotFoundHttpResponse<T> extends HttpResponse { private final int statusCode; private final HttpHeaders headers; private EntityNotFoundHttpResponse(Response<T> response) { super(response.getRequest()); this.headers = response.getHeaders(); this.statusCode = response.getStatusCode(); } @Override public int getStatusCode() { return statusCode; } @Override public String getHeaderValue(String name) { return headers.getValue(name); } @Override public HttpHeaders getHeaders() { return headers; } @Override public Flux<ByteBuffer> getBody() { return Flux.empty(); } @Override public Mono<byte[]> getBodyAsByteArray() { return Mono.empty(); } @Override public Mono<String> getBodyAsString() { return Mono.empty(); } @Override public Mono<String> getBodyAsString(Charset charset) { return Mono.empty(); } } }
I don't believe this is the right operator. Mapping is a transform, I wouldn't want it to return null if the AZURE_REQUEST_HTTP_HEADERS_KEY existed but were another object type ```java context.getData("foo").ifPresent(value -> { if (!(value instanceof HttpHeaders)) { return; } HttpHeaders customHttpHeaders = (HttpHeaders) headers; customHttpHeaders.add(headerName, entity); }); ```
private void addSupplementaryAuthHeader(String headerName, String entity, Context context) { context.getData(AZURE_REQUEST_HTTP_HEADERS_KEY) .map(headers -> { if (headers instanceof HttpHeaders) { HttpHeaders customHttpHeaders = (HttpHeaders) headers; customHttpHeaders.add(headerName, entity); } return null; }); }
.map(headers -> {
private void addSupplementaryAuthHeader(String headerName, String entity, Context context) { context.getData(AZURE_REQUEST_HTTP_HEADERS_KEY) .ifPresent(headers -> { if (headers instanceof HttpHeaders) { HttpHeaders customHttpHeaders = (HttpHeaders) headers; customHttpHeaders.add(headerName, entity); } }); }
class ServiceBusAdministrationAsyncClient { private static final String CONTENT_TYPE = "application/xml"; private static final String QUEUES_ENTITY_TYPE = "queues"; private static final String TOPICS_ENTITY_TYPE = "topics"; private static final int NUMBER_OF_ELEMENTS = 100; private final ServiceBusManagementClientImpl managementClient; private final EntitiesImpl entityClient; private final ClientLogger logger = new ClientLogger(ServiceBusAdministrationAsyncClient.class); private final ServiceBusManagementSerializer serializer; private final RulesImpl rulesClient; /** * Creates a new instance with the given management client and serializer. * * @param managementClient Client to make management calls. * @param serializer Serializer to deserialize ATOM XML responses. * * @throws NullPointerException if any one of {@code managementClient, serializer, credential} is null. */ ServiceBusAdministrationAsyncClient(ServiceBusManagementClientImpl managementClient, ServiceBusManagementSerializer serializer) { this.serializer = Objects.requireNonNull(serializer, "'serializer' cannot be null."); this.managementClient = Objects.requireNonNull(managementClient, "'managementClient' cannot be null."); this.entityClient = managementClient.getEntities(); this.rulesClient = managementClient.getRules(); } /** * Creates a queue with the given name. * * @param queueName Name of the queue to create. * * @return A Mono that completes with information about the created queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code queueName} is null. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws ResourceExistsException if a queue exists with the same {@code queueName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueProperties> createQueue(String queueName) { try { return createQueue(queueName, new CreateQueueOptions()); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Creates a queue with the {@link CreateQueueOptions} and given queue name. * * @param queueName Name of the queue to create. * @param queueOptions Options about the queue to create. * * @return A Mono that completes with information about the created queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} or {@code queueOptions} is null. * @throws ResourceExistsException if a queue exists with the same {@link QueueProperties * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueProperties> createQueue(String queueName, CreateQueueOptions queueOptions) { return createQueueWithResponse(queueName, queueOptions).map(Response::getValue); } /** * Creates a queue and returns the created queue in addition to the HTTP response. * * @param queueName Name of the queue to create. * @param queueOptions Options about the queue to create. * * @return A Mono that returns the created queue in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} or {@code queueOptions} is null. * @throws ResourceExistsException if a queue exists with the same {@link QueueProperties * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<QueueProperties>> createQueueWithResponse(String queueName, CreateQueueOptions queueOptions) { return withContext(context -> createQueueWithResponse(queueName, queueOptions, context)); } /** * Creates a rule under the given topic and subscription * * @param topicName Name of the topic associated with rule. * @param subscriptionName Name of the subscription associated with the rule. * @param ruleName Name of the rule. * * @return A Mono that completes with information about the created rule. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} are are empty strings. * @throws NullPointerException if {@code topicName} or {@code ruleName} are are null. * @throws ResourceExistsException if a rule exists with the same topic, subscription, and rule name. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<RuleProperties> createRule(String topicName, String subscriptionName, String ruleName) { try { return createRule(topicName, subscriptionName, ruleName, new CreateRuleOptions()); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Creates a rule with the {@link CreateRuleOptions}. * * @param topicName Name of the topic associated with rule. * @param subscriptionName Name of the subscription associated with the rule. * @param ruleName Name of the rule. * @param ruleOptions Information about the rule to create. * * @return A Mono that completes with information about the created rule. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} are are empty strings. * @throws NullPointerException if {@code topicName}, {@code ruleName}, or {@code ruleOptions} * are are null. * @throws ResourceExistsException if a rule exists with the same topic and rule name. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<RuleProperties> createRule(String topicName, String subscriptionName, String ruleName, CreateRuleOptions ruleOptions) { return createRuleWithResponse(topicName, subscriptionName, ruleName, ruleOptions) .map(Response::getValue); } /** * Creates a rule and returns the created rule in addition to the HTTP response. * * @param topicName Name of the topic associated with rule. * @param subscriptionName Name of the subscription associated with the rule. * @param ruleName Name of the rule. * @param ruleOptions Information about the rule to create. * * @return A Mono that returns the created rule in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} are are empty strings. * @throws NullPointerException if {@code topicName}, {@code ruleName}, or {@code ruleOptions} * are are null. * @throws ResourceExistsException if a rule exists with the same topic and rule name. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<RuleProperties>> createRuleWithResponse(String topicName, String subscriptionName, String ruleName, CreateRuleOptions ruleOptions) { return withContext(context -> createRuleWithResponse(topicName, subscriptionName, ruleName, ruleOptions, context)); } /** * Creates a subscription with the given topic and subscription names. * * @param topicName Name of the topic associated with subscription. * @param subscriptionName Name of the subscription. * * @return A Mono that completes with information about the created subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are are empty strings. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} are are null. * @throws ResourceExistsException if a subscription exists with the same topic and subscription name. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionProperties> createSubscription(String topicName, String subscriptionName) { try { return createSubscription(topicName, subscriptionName, new CreateSubscriptionOptions()); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Creates a subscription with the {@link CreateSubscriptionOptions}. * * @param topicName Name of the topic associated with subscription. * @param subscriptionName Name of the subscription. * @param subscriptionOptions Information about the subscription to create. * * @return A Mono that completes with information about the created subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are are empty strings. * @throws NullPointerException if {@code topicName}, {@code subscriptionName}, or {@code subscriptionOptions} * are are null. * @throws ResourceExistsException if a subscription exists with the same topic and subscription name. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionProperties> createSubscription(String topicName, String subscriptionName, CreateSubscriptionOptions subscriptionOptions) { return createSubscriptionWithResponse(topicName, subscriptionName, subscriptionOptions) .map(Response::getValue); } /** * Creates a subscription and returns the created subscription in addition to the HTTP response. * * @param topicName Name of the topic associated with subscription. * @param subscriptionName Name of the subscription. * @param subscriptionOptions Information about the subscription to create. * * @return A Mono that returns the created subscription in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are are empty strings. * @throws NullPointerException if {@code topicName}, {@code subscriptionName}, or {@code subscriptionOptions} * are are null. * @throws ResourceExistsException if a subscription exists with the same topic and subscription name. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SubscriptionProperties>> createSubscriptionWithResponse(String topicName, String subscriptionName, CreateSubscriptionOptions subscriptionOptions) { return withContext(context -> createSubscriptionWithResponse(topicName, subscriptionName, subscriptionOptions, context)); } /** * Creates a topic with the given name. * * @param topicName Name of the topic to create. * * @return A Mono that completes with information about the created topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code topicName} is null. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws ResourceExistsException if a topic exists with the same {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicProperties> createTopic(String topicName) { try { return createTopic(topicName, new CreateTopicOptions()); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Creates a topic with the {@link CreateTopicOptions}. * * @param topicName Name of the topic to create. * @param topicOptions The options used to create the topic. * * @return A Mono that completes with information about the created topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code topicOptions} is null. * @throws ResourceExistsException if a topic exists with the same {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicProperties> createTopic(String topicName, CreateTopicOptions topicOptions) { return createTopicWithResponse(topicName, topicOptions).map(Response::getValue); } /** * Creates a topic and returns the created topic in addition to the HTTP response. * * @param topicName Name of the topic to create. * @param topicOptions The options used to create the topic. * * @return A Mono that returns the created topic in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link TopicProperties * string. * @throws NullPointerException if {@code topicName} or {@code topicOptions} is null. * @throws ResourceExistsException if a topic exists with the same {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<TopicProperties>> createTopicWithResponse(String topicName, CreateTopicOptions topicOptions) { return withContext(context -> createTopicWithResponse(topicName, topicOptions, context)); } /** * Deletes a queue the matching {@code queueName}. * * @param queueName Name of queue to delete. * * @return A Mono that completes when the queue is deleted. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws NullPointerException if {@code queueName} is null. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteQueue(String queueName) { return deleteQueueWithResponse(queueName).then(); } /** * Deletes a queue the matching {@code queueName} and returns the HTTP response. * * @param queueName Name of queue to delete. * * @return A Mono that completes when the queue is deleted and returns the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws NullPointerException if {@code queueName} is null. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteQueueWithResponse(String queueName) { return withContext(context -> deleteQueueWithResponse(queueName, context)); } /** * Deletes a rule the matching {@code ruleName}. * * @param topicName Name of topic associated with rule to delete. * @param subscriptionName Name of the subscription associated with the rule to delete. * @param ruleName Name of rule to delete. * * @return A Mono that completes when the rule is deleted. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code ruleName} is null. * @throws ResourceNotFoundException if the {@code ruleName} does not exist. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteRule(String topicName, String subscriptionName, String ruleName) { return deleteRuleWithResponse(topicName, subscriptionName, ruleName).then(); } /** * Deletes a rule the matching {@code ruleName} and returns the HTTP response. * * @param topicName Name of topic associated with rule to delete. * @param subscriptionName Name of the subscription associated with the rule to delete. * @param ruleName Name of rule to delete. * * @return A Mono that completes when the rule is deleted and returns the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName}, {@code subscriptionName}, or {@code ruleName} is an * empty string. * @throws NullPointerException if {@code topicName}, {@code subscriptionName}, or {@code ruleName} is null. * @throws ResourceNotFoundException if the {@code ruleName} does not exist. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteRuleWithResponse(String topicName, String subscriptionName, String ruleName) { return withContext(context -> deleteRuleWithResponse(topicName, subscriptionName, ruleName, context)); } /** * Deletes a subscription the matching {@code subscriptionName}. * * @param topicName Name of topic associated with subscription to delete. * @param subscriptionName Name of subscription to delete. * * @return A Mono that completes when the subscription is deleted. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} is null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteSubscription(String topicName, String subscriptionName) { return deleteSubscriptionWithResponse(topicName, subscriptionName).then(); } /** * Deletes a subscription the matching {@code subscriptionName} and returns the HTTP response. * * @param topicName Name of topic associated with subscription to delete. * @param subscriptionName Name of subscription to delete. * * @return A Mono that completes when the subscription is deleted and returns the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} is null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteSubscriptionWithResponse(String topicName, String subscriptionName) { return withContext(context -> deleteSubscriptionWithResponse(topicName, subscriptionName, context)); } /** * Deletes a topic the matching {@code topicName}. * * @param topicName Name of topic to delete. * * @return A Mono that completes when the topic is deleted. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteTopic(String topicName) { return deleteTopicWithResponse(topicName).then(); } /** * Deletes a topic the matching {@code topicName} and returns the HTTP response. * * @param topicName Name of topic to delete. * * @return A Mono that completes when the topic is deleted and returns the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteTopicWithResponse(String topicName) { return withContext(context -> deleteTopicWithResponse(topicName, context)); } /** * Gets information about the queue. * * @param queueName Name of queue to get information about. * * @return A Mono that completes with information about the queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueProperties> getQueue(String queueName) { return getQueueWithResponse(queueName).map(Response::getValue); } /** * Gets information about the queue along with its HTTP response. * * @param queueName Name of queue to get information about. * * @return A Mono that completes with information about the queue and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<QueueProperties>> getQueueWithResponse(String queueName) { return withContext(context -> getQueueWithResponse(queueName, context, Function.identity())); } /** * Gets whether or not a queue with {@code queueName} exists in the Service Bus namespace. * * @param queueName Name of the queue. * * @return A Mono that completes indicating whether or not the queue exists. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Boolean> getQueueExists(String queueName) { return getQueueExistsWithResponse(queueName).map(Response::getValue); } /** * Gets whether or not a queue with {@code queueName} exists in the Service Bus namespace. * * @param queueName Name of the queue. * * @return A Mono that completes indicating whether or not the queue exists along with its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Boolean>> getQueueExistsWithResponse(String queueName) { return getEntityExistsWithResponse(getQueueWithResponse(queueName)); } /** * Gets runtime properties about the queue. * * @param queueName Name of queue to get information about. * * @return A Mono that completes with runtime properties about the queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueRuntimeProperties> getQueueRuntimeProperties(String queueName) { return getQueueRuntimePropertiesWithResponse(queueName).map(response -> response.getValue()); } /** * Gets runtime properties about the queue along with its HTTP response. * * @param queueName Name of queue to get information about. * * @return A Mono that completes with runtime properties about the queue and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<QueueRuntimeProperties>> getQueueRuntimePropertiesWithResponse(String queueName) { return withContext(context -> getQueueWithResponse(queueName, context, QueueRuntimeProperties::new)); } /** * Gets information about the Service Bus namespace. * * @return A Mono that completes with information about the Service Bus namespace. * @throws ClientAuthenticationException if the client's credentials do not have access to the namespace. * @throws HttpResponseException If error occurred processing the request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<NamespaceProperties> getNamespaceProperties() { return getNamespacePropertiesWithResponse().map(Response::getValue); } /** * Gets information about the Service Bus namespace along with its HTTP response. * * @return A Mono that completes with information about the namespace and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<NamespaceProperties>> getNamespacePropertiesWithResponse() { return withContext(this::getNamespacePropertiesWithResponse); } /** * Gets a rule from the service namespace. * * Only following data types are deserialized in Filters and Action parameters - string, int, long, boolean, double, * and OffsetDateTime. Other data types would return its string value. * * @param topicName The name of the topic relative to service bus namespace. * @param subscriptionName The subscription name the rule belongs to. * @param ruleName The name of the rule to retrieve. * * @return The associated rule. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<RuleProperties> getRule(String topicName, String subscriptionName, String ruleName) { return getRuleWithResponse(topicName, subscriptionName, ruleName).map(response -> response.getValue()); } /** * Gets a rule from the service namespace. * * Only following data types are deserialized in Filters and Action parameters - string, int, long, bool, double, * and OffsetDateTime. Other data types would return its string value. * * @param topicName The name of the topic relative to service bus namespace. * @param subscriptionName The subscription name the rule belongs to. * @param ruleName The name of the rule to retrieve. * * @return The associated rule with the corresponding HTTP response. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<RuleProperties>> getRuleWithResponse(String topicName, String subscriptionName, String ruleName) { return withContext(context -> getRuleWithResponse(topicName, subscriptionName, ruleName, context)); } /** * Gets information about the queue. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return A Mono that completes with information about the subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are empty strings. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} are null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist in the {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionProperties> getSubscription(String topicName, String subscriptionName) { return getSubscriptionWithResponse(topicName, subscriptionName).map(Response::getValue); } /** * Gets information about the subscription along with its HTTP response. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return A Mono that completes with information about the subscription and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are empty strings. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} are null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SubscriptionProperties>> getSubscriptionWithResponse(String topicName, String subscriptionName) { return withContext(context -> getSubscriptionWithResponse(topicName, subscriptionName, context, Function.identity())); } /** * Gets whether or not a subscription within a topic exists. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of the subscription. * * @return A Mono that completes indicating whether or not the subscription exists. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code subscriptionName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Boolean> getSubscriptionExists(String topicName, String subscriptionName) { return getSubscriptionExistsWithResponse(topicName, subscriptionName).map(Response::getValue); } /** * Gets whether or not a subscription within a topic exists. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of the subscription. * * @return A Mono that completes indicating whether or not the subscription exists along with its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code subscriptionName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Boolean>> getSubscriptionExistsWithResponse(String topicName, String subscriptionName) { return getEntityExistsWithResponse(getSubscriptionWithResponse(topicName, subscriptionName)); } /** * Gets runtime properties about the subscription. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return A Mono that completes with runtime properties about the subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are empty strings. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} are null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionRuntimeProperties> getSubscriptionRuntimeProperties( String topicName, String subscriptionName) { return getSubscriptionRuntimePropertiesWithResponse(topicName, subscriptionName) .map(response -> response.getValue()); } /** * Gets runtime properties about the subscription. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return A Mono that completes with runtime properties about the subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code subscriptionName} is null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SubscriptionRuntimeProperties>> getSubscriptionRuntimePropertiesWithResponse( String topicName, String subscriptionName) { return withContext(context -> getSubscriptionWithResponse(topicName, subscriptionName, context, SubscriptionRuntimeProperties::new)); } /** * Gets information about the topic. * * @param topicName Name of topic to get information about. * * @return A Mono that completes with information about the topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicProperties> getTopic(String topicName) { return getTopicWithResponse(topicName).map(Response::getValue); } /** * Gets information about the topic along with its HTTP response. * * @param topicName Name of topic to get information about. * * @return A Mono that completes with information about the topic and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<TopicProperties>> getTopicWithResponse(String topicName) { return withContext(context -> getTopicWithResponse(topicName, context, Function.identity())); } /** * Gets whether or not a topic with {@code topicName} exists in the Service Bus namespace. * * @param topicName Name of the topic. * * @return A Mono that completes indicating whether or not the topic exists. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Boolean> getTopicExists(String topicName) { return getTopicExistsWithResponse(topicName).map(Response::getValue); } /** * Gets whether or not a topic with {@code topicName} exists in the Service Bus namespace. * * @param topicName Name of the topic. * * @return A Mono that completes indicating whether or not the topic exists along with its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Boolean>> getTopicExistsWithResponse(String topicName) { return getEntityExistsWithResponse(getTopicWithResponse(topicName)); } /** * Gets runtime properties about the topic. * * @param topicName Name of topic to get information about. * * @return A Mono that completes with runtime properties about the topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicRuntimeProperties> getTopicRuntimeProperties(String topicName) { return getTopicRuntimePropertiesWithResponse(topicName).map(response -> response.getValue()); } /** * Gets runtime properties about the topic with its HTTP response. * * @param topicName Name of topic to get information about. * * @return A Mono that completes with runtime properties about the topic and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<TopicRuntimeProperties>> getTopicRuntimePropertiesWithResponse(String topicName) { return withContext(context -> getTopicWithResponse(topicName, context, TopicRuntimeProperties::new)); } /** * Fetches all the queues in the Service Bus namespace. * * @return A Flux of {@link QueueProperties queues} in the Service Bus namespace. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<QueueProperties> listQueues() { return new PagedFlux<>( () -> withContext(context -> listQueuesFirstPage(context)), token -> withContext(context -> listQueuesNextPage(token, context))); } /** * Fetches all the rules for a topic and subscription. * * @param topicName The topic name under which all the rules need to be retrieved. * @param subscriptionName The name of the subscription for which all rules need to be retrieved. * * @return A Flux of {@link RuleProperties rules} for the {@code topicName} and {@code subscriptionName}. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} is null. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} is an empty string. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<RuleProperties> listRules(String topicName, String subscriptionName) { if (topicName == null) { return pagedFluxError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return pagedFluxError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } return new PagedFlux<>( () -> withContext(context -> listRulesFirstPage(topicName, subscriptionName, context)), token -> withContext(context -> listRulesNextPage(topicName, subscriptionName, token, context))); } /** * Fetches all the subscriptions for a topic. * * @param topicName The topic name under which all the subscriptions need to be retrieved. * * @return A Flux of {@link SubscriptionProperties subscriptions} for the {@code topicName}. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws NullPointerException if {@code topicName} is null. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<SubscriptionProperties> listSubscriptions(String topicName) { if (topicName == null) { return pagedFluxError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return pagedFluxError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } return new PagedFlux<>( () -> withContext(context -> listSubscriptionsFirstPage(topicName, context)), token -> withContext(context -> listSubscriptionsNextPage(topicName, token, context))); } /** * Fetches all the topics in the Service Bus namespace. * * @return A Flux of {@link TopicProperties topics} in the Service Bus namespace. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<TopicProperties> listTopics() { return new PagedFlux<>( () -> withContext(context -> listTopicsFirstPage(context)), token -> withContext(context -> listTopicsNextPage(token, context))); } /** * Updates a queue with the given {@link QueueProperties}. The {@link QueueProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link QueueProperties * <li>{@link QueueProperties * <li>{@link QueueProperties * </li> * <li>{@link QueueProperties * </ul> * * @param queue Information about the queue to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that completes with the updated queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code queue} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueProperties> updateQueue(QueueProperties queue) { return updateQueueWithResponse(queue).map(Response::getValue); } /** * Updates a queue with the given {@link QueueProperties}. The {@link QueueProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link QueueProperties * <li>{@link QueueProperties * <li>{@link QueueProperties * </li> * <li>{@link QueueProperties * </ul> * * @param queue Information about the queue to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated queue in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code queue} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<QueueProperties>> updateQueueWithResponse(QueueProperties queue) { return withContext(context -> updateQueueWithResponse(queue, context)); } /** * Updates a rule with the given {@link RuleProperties}. The {@link RuleProperties} must be fully populated as all * of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * @param topicName The topic name under which the rule is updated. * @param subscriptionName The name of the subscription for which the rule is updated. * @param rule Information about the rule to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated rule. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the rule quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link RuleProperties * @throws NullPointerException if {@code rule} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<RuleProperties> updateRule(String topicName, String subscriptionName, RuleProperties rule) { return updateRuleWithResponse(topicName, subscriptionName, rule).map(Response::getValue); } /** * Updates a rule with the given {@link RuleProperties}. The {@link RuleProperties} must be fully populated as all * of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * @param topicName The topic name under which the rule is updated. * @param subscriptionName The name of the subscription for which the rule is updated. * @param rule Information about the rule to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated rule in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the rule quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link RuleProperties * @throws NullPointerException if {@code rule} is null. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<RuleProperties>> updateRuleWithResponse(String topicName, String subscriptionName, RuleProperties rule) { return withContext(context -> updateRuleWithResponse(topicName, subscriptionName, rule, context)); } /** * Updates a subscription with the given {@link SubscriptionProperties}. The {@link SubscriptionProperties} must be * fully populated as all of the properties are replaced. If a property is not set the service default value is * used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * </ul> * * @param subscription Information about the subscription to update. You must provide all the property values * that are desired on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the subscription quota is exceeded, or an * error occurred processing the request. * @throws IllegalArgumentException if {@link SubscriptionProperties * SubscriptionProperties * @throws NullPointerException if {@code subscription} is null. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionProperties> updateSubscription(SubscriptionProperties subscription) { return updateSubscriptionWithResponse(subscription).map(Response::getValue); } /** * Updates a subscription with the given {@link SubscriptionProperties}. The {@link SubscriptionProperties} must be * fully populated as all of the properties are replaced. If a property is not set the service default value is * used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * </ul> * * @param subscription Information about the subscription to update. You must provide all the property values * that are desired on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated subscription in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the subscription quota is exceeded, or an * error occurred processing the request. * @throws IllegalArgumentException if {@link SubscriptionProperties * SubscriptionProperties * @throws NullPointerException if {@code subscription} is null. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SubscriptionProperties>> updateSubscriptionWithResponse( SubscriptionProperties subscription) { return withContext(context -> updateSubscriptionWithResponse(subscription, context)); } /** * Updates a topic with the given {@link TopicProperties}. The {@link TopicProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link TopicProperties * <li>{@link TopicProperties * </li> * </ul> * * @param topic Information about the topic to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that completes with the updated topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link TopicProperties * string. * @throws NullPointerException if {@code topic} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicProperties> updateTopic(TopicProperties topic) { return updateTopicWithResponse(topic).map(Response::getValue); } /** * Updates a topic with the given {@link TopicProperties}. The {@link TopicProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link TopicProperties * <li>{@link TopicProperties * </li> * </ul> * * @param topic Information about the topic to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that completes with the updated topic and its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link TopicProperties * string. * @throws NullPointerException if {@code topic} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<TopicProperties>> updateTopicWithResponse(TopicProperties topic) { return withContext(context -> updateTopicWithResponse(topic, context)); } /** * Creates a queue with its context. * * @param createQueueOptions Queue to create. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link QueueProperties}. */ Mono<Response<QueueProperties>> createQueueWithResponse(String queueName, CreateQueueOptions createQueueOptions, Context context) { if (queueName == null) { return monoError(logger, new NullPointerException("'queueName' cannot be null.")); } else if (queueName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'queueName' cannot be empty.")); } if (createQueueOptions == null) { return monoError(logger, new NullPointerException("'createQueueOptions' cannot be null.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context contextWithTrace = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); final Context contextWithHeaders = contextWithTrace.addData(AZURE_REQUEST_HTTP_HEADERS_KEY, new HttpHeaders()); final String forwardToEntity = createQueueOptions.getForwardTo(); if (!CoreUtils.isNullOrEmpty(forwardToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardToEntity, contextWithHeaders); createQueueOptions.setForwardTo(getAbsoluteUrlFromEntity(forwardToEntity)); } final String forwardDlqToEntity = createQueueOptions.getForwardDeadLetteredMessagesTo(); if (!CoreUtils.isNullOrEmpty(forwardDlqToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_DLQ_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardDlqToEntity, contextWithHeaders); createQueueOptions.setForwardDeadLetteredMessagesTo(getAbsoluteUrlFromEntity(forwardDlqToEntity)); } final QueueDescription description = EntityHelper.getQueueDescription(createQueueOptions); final CreateQueueBodyContent content = new CreateQueueBodyContent() .setType(CONTENT_TYPE) .setQueueDescription(description); final CreateQueueBody createEntity = new CreateQueueBody() .setContent(content); try { return entityClient.putWithResponseAsync(queueName, createEntity, null, contextWithHeaders) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(this::deserializeQueue); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a rule with its context. * * @param ruleOptions Rule to create. * @param context Context to pass into request. * * * @return A Mono that completes with the created {@link RuleProperties}. */ Mono<Response<RuleProperties>> createRuleWithResponse(String topicName, String subscriptionName, String ruleName, CreateRuleOptions ruleOptions, Context context) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be empty.")); } if (subscriptionName == null) { return monoError(logger, new NullPointerException("'subscriptionName' cannot be null.")); } else if (subscriptionName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'subscriptionName' cannot be empty.")); } if (ruleName == null) { return monoError(logger, new NullPointerException("'ruleName' cannot be null.")); } else if (ruleName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'ruleName' cannot be empty.")); } if (ruleOptions == null) { return monoError(logger, new NullPointerException("'rule' cannot be null.")); } final RuleActionImpl action = ruleOptions.getAction() != null ? EntityHelper.toImplementation(ruleOptions.getAction()) : null; final RuleFilterImpl filter = ruleOptions.getFilter() != null ? EntityHelper.toImplementation(ruleOptions.getFilter()) : null; final RuleDescription rule = new RuleDescription() .setAction(action) .setFilter(filter) .setName(ruleName); final CreateRuleBodyContent content = new CreateRuleBodyContent() .setType(CONTENT_TYPE) .setRuleDescription(rule); final CreateRuleBody createEntity = new CreateRuleBody().setContent(content); final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return managementClient.getRules().putWithResponseAsync(topicName, subscriptionName, ruleName, createEntity, null, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeRule(response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a subscription with its context. * * @param subscriptionOptions Subscription to create. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link SubscriptionProperties}. */ Mono<Response<SubscriptionProperties>> createSubscriptionWithResponse(String topicName, String subscriptionName, CreateSubscriptionOptions subscriptionOptions, Context context) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be empty.")); } if (subscriptionName == null) { return monoError(logger, new NullPointerException("'subscriptionName' cannot be null.")); } else if (subscriptionName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'subscriptionName' cannot be empty.")); } if (subscriptionOptions == null) { return monoError(logger, new NullPointerException("'subscription' cannot be null.")); } final Context contextWithTrace = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); final Context contextWithHeaders = contextWithTrace.addData(AZURE_REQUEST_HTTP_HEADERS_KEY, new HttpHeaders()); final String forwardToEntity = subscriptionOptions.getForwardTo(); if (!CoreUtils.isNullOrEmpty(forwardToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardToEntity, contextWithHeaders); subscriptionOptions.setForwardTo(getAbsoluteUrlFromEntity(forwardToEntity)); } final String forwardDlqToEntity = subscriptionOptions.getForwardDeadLetteredMessagesTo(); if (!CoreUtils.isNullOrEmpty(forwardDlqToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_DLQ_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardDlqToEntity, contextWithHeaders); subscriptionOptions.setForwardDeadLetteredMessagesTo(getAbsoluteUrlFromEntity(forwardDlqToEntity)); } final SubscriptionDescription subscription = EntityHelper.getSubscriptionDescription(subscriptionOptions); final CreateSubscriptionBodyContent content = new CreateSubscriptionBodyContent() .setType(CONTENT_TYPE) .setSubscriptionDescription(subscription); final CreateSubscriptionBody createEntity = new CreateSubscriptionBody().setContent(content); try { return managementClient.getSubscriptions().putWithResponseAsync(topicName, subscriptionName, createEntity, null, contextWithHeaders) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeSubscription(topicName, response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a topicOptions with its context. * * @param topicOptions Topic to create. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link TopicProperties}. */ Mono<Response<TopicProperties>> createTopicWithResponse(String topicName, CreateTopicOptions topicOptions, Context context) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be empty.")); } if (topicOptions == null) { return monoError(logger, new NullPointerException("'topicOptions' cannot be null")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final TopicDescription topic = EntityHelper.getTopicDescription(topicOptions); final CreateTopicBodyContent content = new CreateTopicBodyContent() .setType(CONTENT_TYPE) .setTopicDescription(topic); final CreateTopicBody createEntity = new CreateTopicBody() .setContent(content); final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return entityClient.putWithResponseAsync(topicName, createEntity, null, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(this::deserializeTopic); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes a queue with its context. * * @param queueName Name of queue to delete. * @param context Context to pass into request. * * @return A Mono that completes when the queue is deleted. */ Mono<Response<Void>> deleteQueueWithResponse(String queueName, Context context) { if (queueName == null) { return monoError(logger, new NullPointerException("'queueName' cannot be null")); } else if (queueName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'queueName' cannot be an empty string.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return entityClient.deleteWithResponseAsync(queueName, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes a queue with its context. * * @param topicName Name of topic to delete. * @param subscriptionName Name of the subscription for the rule. * @param ruleName Name of the rule. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link QueueProperties}. */ Mono<Response<Void>> deleteRuleWithResponse(String topicName, String subscriptionName, String ruleName, Context context) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } else if (subscriptionName == null) { return monoError(logger, new NullPointerException("'subscriptionName' cannot be null")); } else if (subscriptionName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'subscriptionName' cannot be an empty string.")); } else if (ruleName == null) { return monoError(logger, new NullPointerException("'ruleName' cannot be null")); } else if (ruleName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'ruleName' cannot be an empty string.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return rulesClient.deleteWithResponseAsync(topicName, subscriptionName, ruleName, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes a subscription with its context. * * @param topicName Name of topic associated with subscription to delete. * @param subscriptionName Name of subscription to delete. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link SubscriptionProperties}. */ Mono<Response<Void>> deleteSubscriptionWithResponse(String topicName, String subscriptionName, Context context) { if (subscriptionName == null) { return monoError(logger, new NullPointerException("'subscriptionName' cannot be null")); } else if (subscriptionName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'subscriptionName' cannot be an empty string.")); } else if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return managementClient.getSubscriptions().deleteWithResponseAsync(topicName, subscriptionName, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes a topic with its context. * * @param topicName Name of topic to delete. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link TopicProperties}. */ Mono<Response<Void>> deleteTopicWithResponse(String topicName, Context context) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return entityClient.deleteWithResponseAsync(topicName, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets whether an entity exists. * * @param getEntityOperation Operation to get information about entity. If {@link ResourceNotFoundException} is * thrown, then it is mapped to false. * @param <T> Entity type. * * @return True if the entity exists, false otherwise. */ <T> Mono<Response<Boolean>> getEntityExistsWithResponse(Mono<Response<T>> getEntityOperation) { return getEntityOperation.map(response -> { final boolean exists = response.getValue() != null; return (Response<Boolean>) new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), exists); }) .onErrorResume(ResourceNotFoundException.class, exception -> { final HttpResponse response = exception.getResponse(); final Response<Boolean> result = new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), false); return Mono.just(result); }); } /** * Gets a queue with its context. * * @param queueName Name of queue to fetch information for. * @param context Context to pass into request. * * @return A Mono that completes with the {@link QueueProperties}. */ <T> Mono<Response<T>> getQueueWithResponse(String queueName, Context context, Function<QueueProperties, T> mapper) { if (queueName == null) { return monoError(logger, new NullPointerException("'queueName' cannot be null")); } else if (queueName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'queueName' cannot be empty.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return entityClient.getWithResponseAsync(queueName, true, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .handle((response, sink) -> { final Response<QueueProperties> deserialize = deserializeQueue(response); if (deserialize.getValue() == null) { final HttpResponse notFoundResponse = new EntityNotFoundHttpResponse<>(deserialize); sink.error(new ResourceNotFoundException(String.format("Queue '%s' does not exist.", queueName), notFoundResponse)); } else { final T mapped = mapper.apply(deserialize.getValue()); sink.next(new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), mapped)); } }); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<RuleProperties>> getRuleWithResponse(String topicName, String subscriptionName, String ruleName, Context context) { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return rulesClient.getWithResponseAsync(topicName, subscriptionName, ruleName, true, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(this::deserializeRule); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets a subscription with its context. * * @param topicName Name of the topic associated with the subscription. * @param subscriptionName Name of subscription to fetch information for. * @param context Context to pass into request. * * @return A Mono that completes with the {@link SubscriptionProperties}. */ <T> Mono<Response<T>> getSubscriptionWithResponse(String topicName, String subscriptionName, Context context, Function<SubscriptionProperties, T> mapper) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } else if (subscriptionName == null) { return monoError(logger, new NullPointerException("'subscriptionName' cannot be null.")); } else if (subscriptionName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'subscriptionName' cannot be an empty string.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return managementClient.getSubscriptions().getWithResponseAsync(topicName, subscriptionName, true, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .handle((response, sink) -> { final Response<SubscriptionProperties> deserialize = deserializeSubscription(topicName, response); if (deserialize.getValue() == null) { final HttpResponse notFoundResponse = new EntityNotFoundHttpResponse<>(deserialize); sink.error(new ResourceNotFoundException(String.format( "Subscription '%s' in topic '%s' does not exist.", topicName, subscriptionName), notFoundResponse)); } else { final T mapped = mapper.apply(deserialize.getValue()); sink.next(new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), mapped)); } }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets the namespace properties with its context. * * @param context Context to pass into request. * * @return A Mono that completes with the {@link NamespaceProperties}. */ Mono<Response<NamespaceProperties>> getNamespacePropertiesWithResponse(Context context) { return managementClient.getNamespaces().getWithResponseAsync(context).handle((response, sink) -> { final NamespacePropertiesEntry entry = response.getValue(); if (entry == null || entry.getContent() == null) { sink.error(new AzureException( "There was no content inside namespace response. Entry: " + response)); return; } final NamespaceProperties namespaceProperties = entry.getContent().getNamespaceProperties(); final Response<NamespaceProperties> result = new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), namespaceProperties); sink.next(result); }); } /** * Gets a topic with its context. * * @param topicName Name of topic to fetch information for. * @param context Context to pass into request. * * @return A Mono that completes with the {@link TopicProperties}. */ <T> Mono<Response<T>> getTopicWithResponse(String topicName, Context context, Function<TopicProperties, T> mapper) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be empty.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return entityClient.getWithResponseAsync(topicName, true, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .handle((response, sink) -> { final Response<TopicProperties> deserialize = deserializeTopic(response); if (deserialize.getValue() == null) { final HttpResponse notFoundResponse = new EntityNotFoundHttpResponse<>(deserialize); sink.error(new ResourceNotFoundException(String.format("Topic '%s' does not exist.", topicName), notFoundResponse)); } else { final T mapped = mapper.apply(deserialize.getValue()); sink.next(new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), mapped)); } }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets the first page of queues with context. * * @param context Context to pass into request. * * @return A Mono that completes with a page of queues. */ Mono<PagedResponse<QueueProperties>> listQueuesFirstPage(Context context) { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return listQueues(0, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the next page of queues with context. * * @param continuationToken Number of items to skip in feed. * @param context Context to pass into request. * * @return A Mono that completes with a page of queues or empty if there are no items left. */ Mono<PagedResponse<QueueProperties>> listQueuesNextPage(String continuationToken, Context context) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } try { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); final int skip = Integer.parseInt(continuationToken); return listQueues(skip, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the first page of rules with context. * * @param context Context to pass into request. * * @return A Mono that completes with a page of rules. */ Mono<PagedResponse<RuleProperties>> listRulesFirstPage(String topicName, String subscriptionName, Context context) { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return listRules(topicName, subscriptionName, 0, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the next page of rules with context. * * @param continuationToken Number of items to skip in feed. * @param context Context to pass into request. * * @return A Mono that completes with a page of rules or empty if there are no items left. */ Mono<PagedResponse<RuleProperties>> listRulesNextPage(String topicName, String subscriptionName, String continuationToken, Context context) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } try { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); final int skip = Integer.parseInt(continuationToken); return listRules(topicName, subscriptionName, skip, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the first page of subscriptions with context. * * @param context Context to pass into request. * * @return A Mono that completes with a page of subscriptions. */ Mono<PagedResponse<SubscriptionProperties>> listSubscriptionsFirstPage(String topicName, Context context) { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return listSubscriptions(topicName, 0, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the next page of subscriptions with context. * * @param continuationToken Number of items to skip in feed. * @param context Context to pass into request. * * @return A Mono that completes with a page of subscriptions or empty if there are no items left. */ Mono<PagedResponse<SubscriptionProperties>> listSubscriptionsNextPage(String topicName, String continuationToken, Context context) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } try { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); final int skip = Integer.parseInt(continuationToken); return listSubscriptions(topicName, skip, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the first page of topics with context. * * @param context Context to pass into request. * * @return A Mono that completes with a page of topics. */ Mono<PagedResponse<TopicProperties>> listTopicsFirstPage(Context context) { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return listTopics(0, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the next page of topics with context. * * @param continuationToken Number of items to skip in feed. * @param context Context to pass into request. * * @return A Mono that completes with a page of topics or empty if there are no items left. */ Mono<PagedResponse<TopicProperties>> listTopicsNextPage(String continuationToken, Context context) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } try { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); final int skip = Integer.parseInt(continuationToken); return listTopics(skip, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Updates a queue with its context. * * @param queue Information about the queue to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * @param context Context to pass into request. * * @return A Mono that completes with the updated {@link QueueProperties}. */ Mono<Response<QueueProperties>> updateQueueWithResponse(QueueProperties queue, Context context) { if (queue == null) { return monoError(logger, new NullPointerException("'queue' cannot be null")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context contextWithTrace = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); final Context contextWithHeaders = contextWithTrace.addData(AZURE_REQUEST_HTTP_HEADERS_KEY, new HttpHeaders()); final String forwardToEntity = queue.getForwardTo(); if (!CoreUtils.isNullOrEmpty(forwardToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardToEntity, contextWithHeaders); queue.setForwardTo(getAbsoluteUrlFromEntity(forwardToEntity)); } final String forwardDlqToEntity = queue.getForwardDeadLetteredMessagesTo(); if (!CoreUtils.isNullOrEmpty(forwardDlqToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_DLQ_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardDlqToEntity, contextWithHeaders); queue.setForwardDeadLetteredMessagesTo(getAbsoluteUrlFromEntity(forwardDlqToEntity)); } final QueueDescription queueDescription = EntityHelper.toImplementation(queue); final CreateQueueBodyContent content = new CreateQueueBodyContent() .setType(CONTENT_TYPE) .setQueueDescription(queueDescription); final CreateQueueBody createEntity = new CreateQueueBody() .setContent(content); try { return entityClient.putWithResponseAsync(queue.getName(), createEntity, "*", contextWithHeaders) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeQueue(response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates a rule with its context. * * @param rule Information about the rule to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * @param context Context to pass into request. * * @return A Mono that completes with the updated {@link RuleProperties}. */ Mono<Response<RuleProperties>> updateRuleWithResponse(String topicName, String subscriptionName, RuleProperties rule, Context context) { if (rule == null) { return monoError(logger, new NullPointerException("'rule' cannot be null")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final RuleDescription implementation = EntityHelper.toImplementation(rule); final CreateRuleBodyContent content = new CreateRuleBodyContent() .setType(CONTENT_TYPE) .setRuleDescription(implementation); final CreateRuleBody ruleBody = new CreateRuleBody() .setContent(content); final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return managementClient.getRules().putWithResponseAsync(topicName, subscriptionName, rule.getName(), ruleBody, "*", withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeRule(response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates a subscription with its context. * * @param subscription Information about the subscription to update. You must provide all the property values * that are desired on the updated entity. Any values not provided are set to the service default values. * @param context Context to pass into request. * * @return A Mono that completes with the updated {@link SubscriptionProperties}. */ Mono<Response<SubscriptionProperties>> updateSubscriptionWithResponse(SubscriptionProperties subscription, Context context) { if (subscription == null) { return monoError(logger, new NullPointerException("'subscription' cannot be null")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context contextWithTrace = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); final Context contextWithHeaders = contextWithTrace.addData(AZURE_REQUEST_HTTP_HEADERS_KEY, new HttpHeaders()); final String forwardToEntity = subscription.getForwardTo(); if (!CoreUtils.isNullOrEmpty(forwardToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardToEntity, contextWithHeaders); subscription.setForwardTo(getAbsoluteUrlFromEntity(forwardToEntity)); } final String forwardDlqToEntity = subscription.getForwardDeadLetteredMessagesTo(); if (!CoreUtils.isNullOrEmpty(forwardDlqToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_DLQ_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardDlqToEntity, contextWithHeaders); subscription.setForwardDeadLetteredMessagesTo(getAbsoluteUrlFromEntity(forwardDlqToEntity)); } final String topicName = subscription.getTopicName(); final String subscriptionName = subscription.getSubscriptionName(); final SubscriptionDescription implementation = EntityHelper.toImplementation(subscription); final CreateSubscriptionBodyContent content = new CreateSubscriptionBodyContent() .setType(CONTENT_TYPE) .setSubscriptionDescription(implementation); final CreateSubscriptionBody createEntity = new CreateSubscriptionBody() .setContent(content); try { return managementClient.getSubscriptions().putWithResponseAsync(topicName, subscriptionName, createEntity, "*", contextWithHeaders) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeSubscription(topicName, response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates a topic with its context. * * @param topic Information about the topic to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * @param context Context to pass into request. * * @return A Mono that completes with the updated {@link TopicProperties}. */ Mono<Response<TopicProperties>> updateTopicWithResponse(TopicProperties topic, Context context) { if (topic == null) { return monoError(logger, new NullPointerException("'topic' cannot be null")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final TopicDescription implementation = EntityHelper.toImplementation(topic); final CreateTopicBodyContent content = new CreateTopicBodyContent() .setType(CONTENT_TYPE) .setTopicDescription(implementation); final CreateTopicBody createEntity = new CreateTopicBody() .setContent(content); final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return entityClient.putWithResponseAsync(topic.getName(), createEntity, "*", withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeTopic(response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } private <T> T deserialize(Object object, Class<T> clazz) { if (object == null) { return null; } final String contents = String.valueOf(object); if (contents.isEmpty()) { return null; } try { return serializer.deserialize(contents, clazz); } catch (IOException e) { throw logger.logExceptionAsError(new RuntimeException(String.format( "Exception while deserializing. Body: [%s]. Class: %s", contents, clazz), e)); } } /** * Given an HTTP response, will deserialize it into a strongly typed Response object. * * @param response HTTP response to deserialize response body from. * @param clazz Class to deserialize response type into. * @param <T> Class type to deserialize response into. * * @return A Response with a strongly typed response value. */ private <T> Response<T> deserialize(Response<Object> response, Class<T> clazz) { final T deserialize = deserialize(response.getValue(), clazz); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), deserialize); } /** * Converts a Response into its corresponding {@link QueueDescriptionEntry} then mapped into {@link * QueueProperties}. * * @param response HTTP Response to deserialize. * * @return The corresponding HTTP response with convenience properties set. */ private Response<QueueProperties> deserializeQueue(Response<Object> response) { final QueueDescriptionEntry entry = deserialize(response.getValue(), QueueDescriptionEntry.class); if (entry == null) { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent() == null) { logger.info("entry.getContent() is null. The entity may not exist. {}", entry); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent().getQueueDescription() == null) { final TopicDescriptionEntry entryTopic = deserialize(response.getValue(), TopicDescriptionEntry.class); if (entryTopic != null && entryTopic.getContent() != null && entryTopic.getContent().getTopicDescription() != null) { logger.warning("'{}' is not a queue, it is a topic.", entryTopic.getTitle()); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } } final QueueProperties result = EntityHelper.toModel(entry.getContent().getQueueDescription()); final String queueName = getTitleValue(entry.getTitle()); EntityHelper.setQueueName(result, queueName); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), result); } /** * Converts a Response into its corresponding {@link RuleDescriptionEntry} then mapped into {@link RuleProperties}. * * @param response HTTP Response to deserialize. * * @return The corresponding HTTP response with convenience properties set. */ private Response<RuleProperties> deserializeRule(Response<Object> response) { final RuleDescriptionEntry entry = deserialize(response.getValue(), RuleDescriptionEntry.class); if (entry == null) { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent() == null) { logger.info("entry.getContent() is null. The entity may not exist. {}", entry); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } final RuleDescription description = entry.getContent().getRuleDescription(); final RuleProperties result = EntityHelper.toModel(description); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), result); } /** * Converts a Response into its corresponding {@link SubscriptionDescriptionEntry} then mapped into {@link * SubscriptionProperties}. * * @param response HTTP Response to deserialize. * * @return The corresponding HTTP response with convenience properties set. */ private Response<SubscriptionProperties> deserializeSubscription(String topicName, Response<Object> response) { final SubscriptionDescriptionEntry entry = deserialize(response.getValue(), SubscriptionDescriptionEntry.class); if (entry == null) { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent() == null) { logger.warning("entry.getContent() is null. There should have been content returned. Entry: {}", entry); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } final SubscriptionProperties subscription = EntityHelper.toModel( entry.getContent().getSubscriptionDescription()); final String subscriptionName = getTitleValue(entry.getTitle()); EntityHelper.setSubscriptionName(subscription, subscriptionName); EntityHelper.setTopicName(subscription, topicName); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), subscription); } /** * Converts a Response into its corresponding {@link TopicDescriptionEntry} then mapped into {@link * QueueProperties}. * * @param response HTTP Response to deserialize. * * @return The corresponding HTTP response with convenience properties set. */ private Response<TopicProperties> deserializeTopic(Response<Object> response) { final TopicDescriptionEntry entry = deserialize(response.getValue(), TopicDescriptionEntry.class); if (entry == null) { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent() == null) { logger.warning("entry.getContent() is null. There should have been content returned. Entry: {}", entry); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent().getTopicDescription() == null) { final QueueDescriptionEntry entryQueue = deserialize(response.getValue(), QueueDescriptionEntry.class); if (entryQueue != null && entryQueue.getContent() != null && entryQueue.getContent().getQueueDescription() != null) { logger.warning("'{}' is not a topic, it is a queue.", entryQueue.getTitle()); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } } final TopicProperties result = EntityHelper.toModel(entry.getContent().getTopicDescription()); final String topicName = getTitleValue(entry.getTitle()); EntityHelper.setTopicName(result, topicName); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), result); } /** * Creates a {@link FeedPage} given the elements and a set of response links to get the next link from. * * @param entities Entities in the feed. * @param responseLinks Links returned from the feed. * @param <TResult> Type of Service Bus entities in page. * * @return A {@link FeedPage} indicating whether this can be continued or not. * @throws MalformedURLException if the "next" page link does not contain a well-formed URL. */ private <TResult, TFeed> FeedPage<TResult> extractPage(Response<TFeed> response, List<TResult> entities, List<ResponseLink> responseLinks) throws MalformedURLException, UnsupportedEncodingException { final Optional<ResponseLink> nextLink = responseLinks.stream() .filter(link -> link.getRel().equalsIgnoreCase("next")) .findFirst(); if (!nextLink.isPresent()) { return new FeedPage<>(response.getStatusCode(), response.getHeaders(), response.getRequest(), entities); } final URL url = new URL(nextLink.get().getHref()); final String decode = URLDecoder.decode(url.getQuery(), StandardCharsets.UTF_8.name()); final Optional<Integer> skipParameter = Arrays.stream(decode.split("&amp;|&")) .map(part -> part.split("=", 2)) .filter(parts -> parts[0].equalsIgnoreCase("$skip") && parts.length == 2) .map(parts -> Integer.valueOf(parts[1])) .findFirst(); if (skipParameter.isPresent()) { return new FeedPage<>(response.getStatusCode(), response.getHeaders(), response.getRequest(), entities, skipParameter.get()); } else { logger.warning("There should have been a skip parameter for the next page."); return new FeedPage<>(response.getStatusCode(), response.getHeaders(), response.getRequest(), entities); } } /** * Helper method that invokes the service method, extracts the data and translates it to a PagedResponse. * * @param skip Number of elements to skip. * @param context Context for the query. * * @return A Mono that completes with a paged response of queues. */ private Mono<PagedResponse<QueueProperties>> listQueues(int skip, Context context) { return managementClient.listEntitiesWithResponseAsync(QUEUES_ENTITY_TYPE, skip, NUMBER_OF_ELEMENTS, context) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .flatMap(response -> { final Response<QueueDescriptionFeed> feedResponse = deserialize(response, QueueDescriptionFeed.class); final QueueDescriptionFeed feed = feedResponse.getValue(); if (feed == null) { logger.warning("Could not deserialize QueueDescriptionFeed. skip {}, top: {}", skip, NUMBER_OF_ELEMENTS); return Mono.empty(); } final List<QueueProperties> entities = feed.getEntry().stream() .filter(e -> e.getContent() != null && e.getContent().getQueueDescription() != null) .map(e -> { final String queueName = getTitleValue(e.getTitle()); final QueueProperties queueProperties = EntityHelper.toModel( e.getContent().getQueueDescription()); EntityHelper.setQueueName(queueProperties, queueName); return queueProperties; }) .collect(Collectors.toList()); try { return Mono.just(extractPage(feedResponse, entities, feed.getLink())); } catch (MalformedURLException | UnsupportedEncodingException error) { return Mono.error(new RuntimeException("Could not parse response into FeedPage<QueueDescription>", error)); } }); } /** * Helper method that invokes the service method, extracts the data and translates it to a PagedResponse. * * @param skip Number of elements to skip. * @param context Context for the query. * * @return A Mono that completes with a paged response of rules. */ private Mono<PagedResponse<RuleProperties>> listRules(String topicName, String subscriptionName, int skip, Context context) { return managementClient.listRulesWithResponseAsync(topicName, subscriptionName, skip, NUMBER_OF_ELEMENTS, context) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .flatMap(response -> { final Response<RuleDescriptionFeed> feedResponse = deserialize(response, RuleDescriptionFeed.class); final RuleDescriptionFeed feed = feedResponse.getValue(); if (feed == null) { logger.warning("Could not deserialize RuleDescriptionFeed. skip {}, top: {}", skip, NUMBER_OF_ELEMENTS); return Mono.empty(); } final List<RuleProperties> entities = feed.getEntry().stream() .filter(e -> e.getContent() != null && e.getContent().getRuleDescription() != null) .map(e -> { return EntityHelper.toModel(e.getContent().getRuleDescription()); }) .collect(Collectors.toList()); try { return Mono.just(extractPage(feedResponse, entities, feed.getLink())); } catch (MalformedURLException | UnsupportedEncodingException error) { return Mono.error(new RuntimeException( "Could not parse response into FeedPage<RuleDescription>", error)); } }); } /** * Helper method that invokes the service method, extracts the data and translates it to a PagedResponse. * * @param skip Number of elements to skip. * @param context Context for the query. * * @return A Mono that completes with a paged response of subscriptions. */ private Mono<PagedResponse<SubscriptionProperties>> listSubscriptions(String topicName, int skip, Context context) { return managementClient.listSubscriptionsWithResponseAsync(topicName, skip, NUMBER_OF_ELEMENTS, context) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .flatMap(response -> { final Response<SubscriptionDescriptionFeed> feedResponse = deserialize(response, SubscriptionDescriptionFeed.class); final SubscriptionDescriptionFeed feed = feedResponse.getValue(); if (feed == null) { logger.warning("Could not deserialize SubscriptionDescriptionFeed. skip {}, top: {}", skip, NUMBER_OF_ELEMENTS); return Mono.empty(); } final List<SubscriptionProperties> entities = feed.getEntry().stream() .filter(e -> e.getContent() != null && e.getContent().getSubscriptionDescription() != null) .map(e -> { final String subscriptionName = getTitleValue(e.getTitle()); final SubscriptionProperties description = EntityHelper.toModel( e.getContent().getSubscriptionDescription()); EntityHelper.setTopicName(description, topicName); EntityHelper.setSubscriptionName(description, subscriptionName); return description; }) .collect(Collectors.toList()); try { return Mono.just(extractPage(feedResponse, entities, feed.getLink())); } catch (MalformedURLException | UnsupportedEncodingException error) { return Mono.error(new RuntimeException( "Could not parse response into FeedPage<SubscriptionDescription>", error)); } }); } /** * Helper method that invokes the service method, extracts the data and translates it to a PagedResponse. * * @param skip Number of elements to skip. * @param context Context for the query. * * @return A Mono that completes with a paged response of topics. */ private Mono<PagedResponse<TopicProperties>> listTopics(int skip, Context context) { return managementClient.listEntitiesWithResponseAsync(TOPICS_ENTITY_TYPE, skip, NUMBER_OF_ELEMENTS, context) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .flatMap(response -> { final Response<TopicDescriptionFeed> feedResponse = deserialize(response, TopicDescriptionFeed.class); final TopicDescriptionFeed feed = feedResponse.getValue(); if (feed == null) { logger.warning("Could not deserialize TopicDescriptionFeed. skip {}, top: {}", skip, NUMBER_OF_ELEMENTS); return Mono.empty(); } final List<TopicProperties> entities = feed.getEntry().stream() .filter(e -> e.getContent() != null && e.getContent().getTopicDescription() != null) .map(e -> { final String topicName = getTitleValue(e.getTitle()); final TopicProperties topicProperties = EntityHelper.toModel( e.getContent().getTopicDescription()); EntityHelper.setTopicName(topicProperties, topicName); return topicProperties; }) .collect(Collectors.toList()); try { return Mono.just(extractPage(feedResponse, entities, feed.getLink())); } catch (MalformedURLException | UnsupportedEncodingException error) { return Mono.error(new RuntimeException("Could not parse response into FeedPage<TopicDescription>", error)); } }); } /** * Check that the additional headers field is present and add the additional auth header * * @param headerName name of the header to be added * @param context current request context * * @return boolean representing the outcome of adding header operation */ /** * Checks if the given entity is an absolute URL, if so return it. * Otherwise, construct the URL from the given entity and return that. * * @param entity : entity to forward messages to. * * @return Forward to Entity represented as an absolute URL */ private String getAbsoluteUrlFromEntity(String entity) { try { URL url = new URL(entity); return url.toString(); } catch (MalformedURLException ex) { } UrlBuilder urlBuilder = new UrlBuilder(); urlBuilder.setScheme("https"); urlBuilder.setHost(managementClient.getEndpoint()); urlBuilder.setPath(entity); try { URL url = urlBuilder.toUrl(); return url.toString(); } catch (MalformedURLException ex) { logger.error("Failed to construct URL using the endpoint:'{}' and entity:'{}'", managementClient.getEndpoint(), entity); logger.logThrowableAsError(ex); } return null; } /** * Given an XML title element, returns the XML text inside. Jackson deserializes Objects as LinkedHashMaps. XML text * is represented as an entry with an empty string as the key. * * For example, the text returned from this {@code <title text="text/xml">QueueName</title>} is "QueueName". * * @param responseTitle XML title element. * * @return The XML text inside the title. {@code null} is returned if there is no value. */ @SuppressWarnings("unchecked") private String getTitleValue(Object responseTitle) { if (!(responseTitle instanceof Map)) { return null; } final Map<String, String> map; try { map = (Map<String, String>) responseTitle; return map.get(""); } catch (ClassCastException error) { logger.warning("Unable to cast to Map<String,String>. Title: {}", responseTitle, error); return null; } } /** * Maps an exception from the ATOM APIs to its associated {@link HttpResponseException}. * * @param exception Exception from the ATOM API. * * @return The corresponding {@link HttpResponseException} or {@code throwable} if it is not an instance of {@link * ServiceBusManagementErrorException}. */ private static Throwable mapException(Throwable exception) { if (!(exception instanceof ServiceBusManagementErrorException)) { return exception; } final ServiceBusManagementErrorException managementError = ((ServiceBusManagementErrorException) exception); final ServiceBusManagementError error = managementError.getValue(); final HttpResponse errorHttpResponse = managementError.getResponse(); final int statusCode = error != null && error.getCode() != null ? error.getCode() : errorHttpResponse.getStatusCode(); final String errorDetail = error != null && error.getDetail() != null ? error.getDetail() : managementError.getMessage(); switch (statusCode) { case 401: return new ClientAuthenticationException(errorDetail, managementError.getResponse(), exception); case 404: return new ResourceNotFoundException(errorDetail, managementError.getResponse(), exception); case 409: return new ResourceExistsException(errorDetail, managementError.getResponse(), exception); case 412: return new ResourceModifiedException(errorDetail, managementError.getResponse(), exception); default: return new HttpResponseException(errorDetail, managementError.getResponse(), exception); } } /** * A page of Service Bus entities. * * @param <T> The entity description from Service Bus. */ private static final class FeedPage<T> implements PagedResponse<T> { private final int statusCode; private final HttpHeaders header; private final HttpRequest request; private final IterableStream<T> entries; private final String continuationToken; /** * Creates a page that does not have any more pages. * * @param entries Items in the page. */ private FeedPage(int statusCode, HttpHeaders header, HttpRequest request, List<T> entries) { this.statusCode = statusCode; this.header = header; this.request = request; this.entries = new IterableStream<>(entries); this.continuationToken = null; } /** * Creates an instance that has additional pages to fetch. * * @param entries Items in the page. * @param skip Number of elements to "skip". */ private FeedPage(int statusCode, HttpHeaders header, HttpRequest request, List<T> entries, int skip) { this.statusCode = statusCode; this.header = header; this.request = request; this.entries = new IterableStream<>(entries); this.continuationToken = String.valueOf(skip); } @Override public IterableStream<T> getElements() { return entries; } @Override public String getContinuationToken() { return continuationToken; } @Override public int getStatusCode() { return statusCode; } @Override public HttpHeaders getHeaders() { return header; } @Override public HttpRequest getRequest() { return request; } @Override public void close() { } } private static final class EntityNotFoundHttpResponse<T> extends HttpResponse { private final int statusCode; private final HttpHeaders headers; private EntityNotFoundHttpResponse(Response<T> response) { super(response.getRequest()); this.headers = response.getHeaders(); this.statusCode = response.getStatusCode(); } @Override public int getStatusCode() { return statusCode; } @Override public String getHeaderValue(String name) { return headers.getValue(name); } @Override public HttpHeaders getHeaders() { return headers; } @Override public Flux<ByteBuffer> getBody() { return Flux.empty(); } @Override public Mono<byte[]> getBodyAsByteArray() { return Mono.empty(); } @Override public Mono<String> getBodyAsString() { return Mono.empty(); } @Override public Mono<String> getBodyAsString(Charset charset) { return Mono.empty(); } } }
class ServiceBusAdministrationAsyncClient { private static final String CONTENT_TYPE = "application/xml"; private static final String QUEUES_ENTITY_TYPE = "queues"; private static final String TOPICS_ENTITY_TYPE = "topics"; private static final int NUMBER_OF_ELEMENTS = 100; private final ServiceBusManagementClientImpl managementClient; private final EntitiesImpl entityClient; private final ClientLogger logger = new ClientLogger(ServiceBusAdministrationAsyncClient.class); private final ServiceBusManagementSerializer serializer; private final RulesImpl rulesClient; /** * Creates a new instance with the given management client and serializer. * * @param managementClient Client to make management calls. * @param serializer Serializer to deserialize ATOM XML responses. * * @throws NullPointerException if any one of {@code managementClient, serializer, credential} is null. */ ServiceBusAdministrationAsyncClient(ServiceBusManagementClientImpl managementClient, ServiceBusManagementSerializer serializer) { this.serializer = Objects.requireNonNull(serializer, "'serializer' cannot be null."); this.managementClient = Objects.requireNonNull(managementClient, "'managementClient' cannot be null."); this.entityClient = managementClient.getEntities(); this.rulesClient = managementClient.getRules(); } /** * Creates a queue with the given name. * * @param queueName Name of the queue to create. * * @return A Mono that completes with information about the created queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code queueName} is null. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws ResourceExistsException if a queue exists with the same {@code queueName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueProperties> createQueue(String queueName) { try { return createQueue(queueName, new CreateQueueOptions()); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Creates a queue with the {@link CreateQueueOptions} and given queue name. * * @param queueName Name of the queue to create. * @param queueOptions Options about the queue to create. * * @return A Mono that completes with information about the created queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} or {@code queueOptions} is null. * @throws ResourceExistsException if a queue exists with the same {@link QueueProperties * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueProperties> createQueue(String queueName, CreateQueueOptions queueOptions) { return createQueueWithResponse(queueName, queueOptions).map(Response::getValue); } /** * Creates a queue and returns the created queue in addition to the HTTP response. * * @param queueName Name of the queue to create. * @param queueOptions Options about the queue to create. * * @return A Mono that returns the created queue in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} or {@code queueOptions} is null. * @throws ResourceExistsException if a queue exists with the same {@link QueueProperties * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<QueueProperties>> createQueueWithResponse(String queueName, CreateQueueOptions queueOptions) { return withContext(context -> createQueueWithResponse(queueName, queueOptions, context)); } /** * Creates a rule under the given topic and subscription * * @param topicName Name of the topic associated with rule. * @param subscriptionName Name of the subscription associated with the rule. * @param ruleName Name of the rule. * * @return A Mono that completes with information about the created rule. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} are are empty strings. * @throws NullPointerException if {@code topicName} or {@code ruleName} are are null. * @throws ResourceExistsException if a rule exists with the same topic, subscription, and rule name. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<RuleProperties> createRule(String topicName, String subscriptionName, String ruleName) { try { return createRule(topicName, subscriptionName, ruleName, new CreateRuleOptions()); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Creates a rule with the {@link CreateRuleOptions}. * * @param topicName Name of the topic associated with rule. * @param subscriptionName Name of the subscription associated with the rule. * @param ruleName Name of the rule. * @param ruleOptions Information about the rule to create. * * @return A Mono that completes with information about the created rule. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} are are empty strings. * @throws NullPointerException if {@code topicName}, {@code ruleName}, or {@code ruleOptions} * are are null. * @throws ResourceExistsException if a rule exists with the same topic and rule name. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<RuleProperties> createRule(String topicName, String subscriptionName, String ruleName, CreateRuleOptions ruleOptions) { return createRuleWithResponse(topicName, subscriptionName, ruleName, ruleOptions) .map(Response::getValue); } /** * Creates a rule and returns the created rule in addition to the HTTP response. * * @param topicName Name of the topic associated with rule. * @param subscriptionName Name of the subscription associated with the rule. * @param ruleName Name of the rule. * @param ruleOptions Information about the rule to create. * * @return A Mono that returns the created rule in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} are are empty strings. * @throws NullPointerException if {@code topicName}, {@code ruleName}, or {@code ruleOptions} * are are null. * @throws ResourceExistsException if a rule exists with the same topic and rule name. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<RuleProperties>> createRuleWithResponse(String topicName, String subscriptionName, String ruleName, CreateRuleOptions ruleOptions) { return withContext(context -> createRuleWithResponse(topicName, subscriptionName, ruleName, ruleOptions, context)); } /** * Creates a subscription with the given topic and subscription names. * * @param topicName Name of the topic associated with subscription. * @param subscriptionName Name of the subscription. * * @return A Mono that completes with information about the created subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are are empty strings. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} are are null. * @throws ResourceExistsException if a subscription exists with the same topic and subscription name. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionProperties> createSubscription(String topicName, String subscriptionName) { try { return createSubscription(topicName, subscriptionName, new CreateSubscriptionOptions()); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Creates a subscription with the {@link CreateSubscriptionOptions}. * * @param topicName Name of the topic associated with subscription. * @param subscriptionName Name of the subscription. * @param subscriptionOptions Information about the subscription to create. * * @return A Mono that completes with information about the created subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are are empty strings. * @throws NullPointerException if {@code topicName}, {@code subscriptionName}, or {@code subscriptionOptions} * are are null. * @throws ResourceExistsException if a subscription exists with the same topic and subscription name. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionProperties> createSubscription(String topicName, String subscriptionName, CreateSubscriptionOptions subscriptionOptions) { return createSubscriptionWithResponse(topicName, subscriptionName, subscriptionOptions) .map(Response::getValue); } /** * Creates a subscription and returns the created subscription in addition to the HTTP response. * * @param topicName Name of the topic associated with subscription. * @param subscriptionName Name of the subscription. * @param subscriptionOptions Information about the subscription to create. * * @return A Mono that returns the created subscription in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are are empty strings. * @throws NullPointerException if {@code topicName}, {@code subscriptionName}, or {@code subscriptionOptions} * are are null. * @throws ResourceExistsException if a subscription exists with the same topic and subscription name. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SubscriptionProperties>> createSubscriptionWithResponse(String topicName, String subscriptionName, CreateSubscriptionOptions subscriptionOptions) { return withContext(context -> createSubscriptionWithResponse(topicName, subscriptionName, subscriptionOptions, context)); } /** * Creates a topic with the given name. * * @param topicName Name of the topic to create. * * @return A Mono that completes with information about the created topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code topicName} is null. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws ResourceExistsException if a topic exists with the same {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicProperties> createTopic(String topicName) { try { return createTopic(topicName, new CreateTopicOptions()); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Creates a topic with the {@link CreateTopicOptions}. * * @param topicName Name of the topic to create. * @param topicOptions The options used to create the topic. * * @return A Mono that completes with information about the created topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code topicOptions} is null. * @throws ResourceExistsException if a topic exists with the same {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicProperties> createTopic(String topicName, CreateTopicOptions topicOptions) { return createTopicWithResponse(topicName, topicOptions).map(Response::getValue); } /** * Creates a topic and returns the created topic in addition to the HTTP response. * * @param topicName Name of the topic to create. * @param topicOptions The options used to create the topic. * * @return A Mono that returns the created topic in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link TopicProperties * string. * @throws NullPointerException if {@code topicName} or {@code topicOptions} is null. * @throws ResourceExistsException if a topic exists with the same {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<TopicProperties>> createTopicWithResponse(String topicName, CreateTopicOptions topicOptions) { return withContext(context -> createTopicWithResponse(topicName, topicOptions, context)); } /** * Deletes a queue the matching {@code queueName}. * * @param queueName Name of queue to delete. * * @return A Mono that completes when the queue is deleted. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws NullPointerException if {@code queueName} is null. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteQueue(String queueName) { return deleteQueueWithResponse(queueName).then(); } /** * Deletes a queue the matching {@code queueName} and returns the HTTP response. * * @param queueName Name of queue to delete. * * @return A Mono that completes when the queue is deleted and returns the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws NullPointerException if {@code queueName} is null. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteQueueWithResponse(String queueName) { return withContext(context -> deleteQueueWithResponse(queueName, context)); } /** * Deletes a rule the matching {@code ruleName}. * * @param topicName Name of topic associated with rule to delete. * @param subscriptionName Name of the subscription associated with the rule to delete. * @param ruleName Name of rule to delete. * * @return A Mono that completes when the rule is deleted. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code ruleName} is null. * @throws ResourceNotFoundException if the {@code ruleName} does not exist. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteRule(String topicName, String subscriptionName, String ruleName) { return deleteRuleWithResponse(topicName, subscriptionName, ruleName).then(); } /** * Deletes a rule the matching {@code ruleName} and returns the HTTP response. * * @param topicName Name of topic associated with rule to delete. * @param subscriptionName Name of the subscription associated with the rule to delete. * @param ruleName Name of rule to delete. * * @return A Mono that completes when the rule is deleted and returns the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName}, {@code subscriptionName}, or {@code ruleName} is an * empty string. * @throws NullPointerException if {@code topicName}, {@code subscriptionName}, or {@code ruleName} is null. * @throws ResourceNotFoundException if the {@code ruleName} does not exist. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteRuleWithResponse(String topicName, String subscriptionName, String ruleName) { return withContext(context -> deleteRuleWithResponse(topicName, subscriptionName, ruleName, context)); } /** * Deletes a subscription the matching {@code subscriptionName}. * * @param topicName Name of topic associated with subscription to delete. * @param subscriptionName Name of subscription to delete. * * @return A Mono that completes when the subscription is deleted. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} is null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteSubscription(String topicName, String subscriptionName) { return deleteSubscriptionWithResponse(topicName, subscriptionName).then(); } /** * Deletes a subscription the matching {@code subscriptionName} and returns the HTTP response. * * @param topicName Name of topic associated with subscription to delete. * @param subscriptionName Name of subscription to delete. * * @return A Mono that completes when the subscription is deleted and returns the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} is null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteSubscriptionWithResponse(String topicName, String subscriptionName) { return withContext(context -> deleteSubscriptionWithResponse(topicName, subscriptionName, context)); } /** * Deletes a topic the matching {@code topicName}. * * @param topicName Name of topic to delete. * * @return A Mono that completes when the topic is deleted. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteTopic(String topicName) { return deleteTopicWithResponse(topicName).then(); } /** * Deletes a topic the matching {@code topicName} and returns the HTTP response. * * @param topicName Name of topic to delete. * * @return A Mono that completes when the topic is deleted and returns the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteTopicWithResponse(String topicName) { return withContext(context -> deleteTopicWithResponse(topicName, context)); } /** * Gets information about the queue. * * @param queueName Name of queue to get information about. * * @return A Mono that completes with information about the queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueProperties> getQueue(String queueName) { return getQueueWithResponse(queueName).map(Response::getValue); } /** * Gets information about the queue along with its HTTP response. * * @param queueName Name of queue to get information about. * * @return A Mono that completes with information about the queue and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<QueueProperties>> getQueueWithResponse(String queueName) { return withContext(context -> getQueueWithResponse(queueName, context, Function.identity())); } /** * Gets whether or not a queue with {@code queueName} exists in the Service Bus namespace. * * @param queueName Name of the queue. * * @return A Mono that completes indicating whether or not the queue exists. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Boolean> getQueueExists(String queueName) { return getQueueExistsWithResponse(queueName).map(Response::getValue); } /** * Gets whether or not a queue with {@code queueName} exists in the Service Bus namespace. * * @param queueName Name of the queue. * * @return A Mono that completes indicating whether or not the queue exists along with its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Boolean>> getQueueExistsWithResponse(String queueName) { return getEntityExistsWithResponse(getQueueWithResponse(queueName)); } /** * Gets runtime properties about the queue. * * @param queueName Name of queue to get information about. * * @return A Mono that completes with runtime properties about the queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueRuntimeProperties> getQueueRuntimeProperties(String queueName) { return getQueueRuntimePropertiesWithResponse(queueName).map(response -> response.getValue()); } /** * Gets runtime properties about the queue along with its HTTP response. * * @param queueName Name of queue to get information about. * * @return A Mono that completes with runtime properties about the queue and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<QueueRuntimeProperties>> getQueueRuntimePropertiesWithResponse(String queueName) { return withContext(context -> getQueueWithResponse(queueName, context, QueueRuntimeProperties::new)); } /** * Gets information about the Service Bus namespace. * * @return A Mono that completes with information about the Service Bus namespace. * @throws ClientAuthenticationException if the client's credentials do not have access to the namespace. * @throws HttpResponseException If error occurred processing the request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<NamespaceProperties> getNamespaceProperties() { return getNamespacePropertiesWithResponse().map(Response::getValue); } /** * Gets information about the Service Bus namespace along with its HTTP response. * * @return A Mono that completes with information about the namespace and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<NamespaceProperties>> getNamespacePropertiesWithResponse() { return withContext(this::getNamespacePropertiesWithResponse); } /** * Gets a rule from the service namespace. * * Only following data types are deserialized in Filters and Action parameters - string, int, long, boolean, double, * and OffsetDateTime. Other data types would return its string value. * * @param topicName The name of the topic relative to service bus namespace. * @param subscriptionName The subscription name the rule belongs to. * @param ruleName The name of the rule to retrieve. * * @return The associated rule. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<RuleProperties> getRule(String topicName, String subscriptionName, String ruleName) { return getRuleWithResponse(topicName, subscriptionName, ruleName).map(response -> response.getValue()); } /** * Gets a rule from the service namespace. * * Only following data types are deserialized in Filters and Action parameters - string, int, long, bool, double, * and OffsetDateTime. Other data types would return its string value. * * @param topicName The name of the topic relative to service bus namespace. * @param subscriptionName The subscription name the rule belongs to. * @param ruleName The name of the rule to retrieve. * * @return The associated rule with the corresponding HTTP response. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<RuleProperties>> getRuleWithResponse(String topicName, String subscriptionName, String ruleName) { return withContext(context -> getRuleWithResponse(topicName, subscriptionName, ruleName, context)); } /** * Gets information about the queue. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return A Mono that completes with information about the subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are empty strings. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} are null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist in the {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionProperties> getSubscription(String topicName, String subscriptionName) { return getSubscriptionWithResponse(topicName, subscriptionName).map(Response::getValue); } /** * Gets information about the subscription along with its HTTP response. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return A Mono that completes with information about the subscription and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are empty strings. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} are null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SubscriptionProperties>> getSubscriptionWithResponse(String topicName, String subscriptionName) { return withContext(context -> getSubscriptionWithResponse(topicName, subscriptionName, context, Function.identity())); } /** * Gets whether or not a subscription within a topic exists. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of the subscription. * * @return A Mono that completes indicating whether or not the subscription exists. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code subscriptionName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Boolean> getSubscriptionExists(String topicName, String subscriptionName) { return getSubscriptionExistsWithResponse(topicName, subscriptionName).map(Response::getValue); } /** * Gets whether or not a subscription within a topic exists. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of the subscription. * * @return A Mono that completes indicating whether or not the subscription exists along with its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code subscriptionName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Boolean>> getSubscriptionExistsWithResponse(String topicName, String subscriptionName) { return getEntityExistsWithResponse(getSubscriptionWithResponse(topicName, subscriptionName)); } /** * Gets runtime properties about the subscription. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return A Mono that completes with runtime properties about the subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are empty strings. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} are null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionRuntimeProperties> getSubscriptionRuntimeProperties( String topicName, String subscriptionName) { return getSubscriptionRuntimePropertiesWithResponse(topicName, subscriptionName) .map(response -> response.getValue()); } /** * Gets runtime properties about the subscription. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return A Mono that completes with runtime properties about the subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code subscriptionName} is null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SubscriptionRuntimeProperties>> getSubscriptionRuntimePropertiesWithResponse( String topicName, String subscriptionName) { return withContext(context -> getSubscriptionWithResponse(topicName, subscriptionName, context, SubscriptionRuntimeProperties::new)); } /** * Gets information about the topic. * * @param topicName Name of topic to get information about. * * @return A Mono that completes with information about the topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicProperties> getTopic(String topicName) { return getTopicWithResponse(topicName).map(Response::getValue); } /** * Gets information about the topic along with its HTTP response. * * @param topicName Name of topic to get information about. * * @return A Mono that completes with information about the topic and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<TopicProperties>> getTopicWithResponse(String topicName) { return withContext(context -> getTopicWithResponse(topicName, context, Function.identity())); } /** * Gets whether or not a topic with {@code topicName} exists in the Service Bus namespace. * * @param topicName Name of the topic. * * @return A Mono that completes indicating whether or not the topic exists. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Boolean> getTopicExists(String topicName) { return getTopicExistsWithResponse(topicName).map(Response::getValue); } /** * Gets whether or not a topic with {@code topicName} exists in the Service Bus namespace. * * @param topicName Name of the topic. * * @return A Mono that completes indicating whether or not the topic exists along with its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Boolean>> getTopicExistsWithResponse(String topicName) { return getEntityExistsWithResponse(getTopicWithResponse(topicName)); } /** * Gets runtime properties about the topic. * * @param topicName Name of topic to get information about. * * @return A Mono that completes with runtime properties about the topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicRuntimeProperties> getTopicRuntimeProperties(String topicName) { return getTopicRuntimePropertiesWithResponse(topicName).map(response -> response.getValue()); } /** * Gets runtime properties about the topic with its HTTP response. * * @param topicName Name of topic to get information about. * * @return A Mono that completes with runtime properties about the topic and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<TopicRuntimeProperties>> getTopicRuntimePropertiesWithResponse(String topicName) { return withContext(context -> getTopicWithResponse(topicName, context, TopicRuntimeProperties::new)); } /** * Fetches all the queues in the Service Bus namespace. * * @return A Flux of {@link QueueProperties queues} in the Service Bus namespace. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<QueueProperties> listQueues() { return new PagedFlux<>( () -> withContext(context -> listQueuesFirstPage(context)), token -> withContext(context -> listQueuesNextPage(token, context))); } /** * Fetches all the rules for a topic and subscription. * * @param topicName The topic name under which all the rules need to be retrieved. * @param subscriptionName The name of the subscription for which all rules need to be retrieved. * * @return A Flux of {@link RuleProperties rules} for the {@code topicName} and {@code subscriptionName}. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} is null. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} is an empty string. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<RuleProperties> listRules(String topicName, String subscriptionName) { if (topicName == null) { return pagedFluxError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return pagedFluxError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } return new PagedFlux<>( () -> withContext(context -> listRulesFirstPage(topicName, subscriptionName, context)), token -> withContext(context -> listRulesNextPage(topicName, subscriptionName, token, context))); } /** * Fetches all the subscriptions for a topic. * * @param topicName The topic name under which all the subscriptions need to be retrieved. * * @return A Flux of {@link SubscriptionProperties subscriptions} for the {@code topicName}. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws NullPointerException if {@code topicName} is null. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<SubscriptionProperties> listSubscriptions(String topicName) { if (topicName == null) { return pagedFluxError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return pagedFluxError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } return new PagedFlux<>( () -> withContext(context -> listSubscriptionsFirstPage(topicName, context)), token -> withContext(context -> listSubscriptionsNextPage(topicName, token, context))); } /** * Fetches all the topics in the Service Bus namespace. * * @return A Flux of {@link TopicProperties topics} in the Service Bus namespace. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<TopicProperties> listTopics() { return new PagedFlux<>( () -> withContext(context -> listTopicsFirstPage(context)), token -> withContext(context -> listTopicsNextPage(token, context))); } /** * Updates a queue with the given {@link QueueProperties}. The {@link QueueProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link QueueProperties * <li>{@link QueueProperties * <li>{@link QueueProperties * </li> * <li>{@link QueueProperties * </ul> * * @param queue Information about the queue to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that completes with the updated queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code queue} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueProperties> updateQueue(QueueProperties queue) { return updateQueueWithResponse(queue).map(Response::getValue); } /** * Updates a queue with the given {@link QueueProperties}. The {@link QueueProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link QueueProperties * <li>{@link QueueProperties * <li>{@link QueueProperties * </li> * <li>{@link QueueProperties * </ul> * * @param queue Information about the queue to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated queue in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code queue} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<QueueProperties>> updateQueueWithResponse(QueueProperties queue) { return withContext(context -> updateQueueWithResponse(queue, context)); } /** * Updates a rule with the given {@link RuleProperties}. The {@link RuleProperties} must be fully populated as all * of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * @param topicName The topic name under which the rule is updated. * @param subscriptionName The name of the subscription for which the rule is updated. * @param rule Information about the rule to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated rule. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the rule quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link RuleProperties * @throws NullPointerException if {@code rule} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<RuleProperties> updateRule(String topicName, String subscriptionName, RuleProperties rule) { return updateRuleWithResponse(topicName, subscriptionName, rule).map(Response::getValue); } /** * Updates a rule with the given {@link RuleProperties}. The {@link RuleProperties} must be fully populated as all * of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * @param topicName The topic name under which the rule is updated. * @param subscriptionName The name of the subscription for which the rule is updated. * @param rule Information about the rule to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated rule in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the rule quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link RuleProperties * @throws NullPointerException if {@code rule} is null. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<RuleProperties>> updateRuleWithResponse(String topicName, String subscriptionName, RuleProperties rule) { return withContext(context -> updateRuleWithResponse(topicName, subscriptionName, rule, context)); } /** * Updates a subscription with the given {@link SubscriptionProperties}. The {@link SubscriptionProperties} must be * fully populated as all of the properties are replaced. If a property is not set the service default value is * used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * </ul> * * @param subscription Information about the subscription to update. You must provide all the property values * that are desired on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the subscription quota is exceeded, or an * error occurred processing the request. * @throws IllegalArgumentException if {@link SubscriptionProperties * SubscriptionProperties * @throws NullPointerException if {@code subscription} is null. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionProperties> updateSubscription(SubscriptionProperties subscription) { return updateSubscriptionWithResponse(subscription).map(Response::getValue); } /** * Updates a subscription with the given {@link SubscriptionProperties}. The {@link SubscriptionProperties} must be * fully populated as all of the properties are replaced. If a property is not set the service default value is * used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * </ul> * * @param subscription Information about the subscription to update. You must provide all the property values * that are desired on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated subscription in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the subscription quota is exceeded, or an * error occurred processing the request. * @throws IllegalArgumentException if {@link SubscriptionProperties * SubscriptionProperties * @throws NullPointerException if {@code subscription} is null. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SubscriptionProperties>> updateSubscriptionWithResponse( SubscriptionProperties subscription) { return withContext(context -> updateSubscriptionWithResponse(subscription, context)); } /** * Updates a topic with the given {@link TopicProperties}. The {@link TopicProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link TopicProperties * <li>{@link TopicProperties * </li> * </ul> * * @param topic Information about the topic to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that completes with the updated topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link TopicProperties * string. * @throws NullPointerException if {@code topic} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicProperties> updateTopic(TopicProperties topic) { return updateTopicWithResponse(topic).map(Response::getValue); } /** * Updates a topic with the given {@link TopicProperties}. The {@link TopicProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link TopicProperties * <li>{@link TopicProperties * </li> * </ul> * * @param topic Information about the topic to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that completes with the updated topic and its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link TopicProperties * string. * @throws NullPointerException if {@code topic} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<TopicProperties>> updateTopicWithResponse(TopicProperties topic) { return withContext(context -> updateTopicWithResponse(topic, context)); } /** * Creates a queue with its context. * * @param createQueueOptions Queue to create. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link QueueProperties}. */ Mono<Response<QueueProperties>> createQueueWithResponse(String queueName, CreateQueueOptions createQueueOptions, Context context) { if (queueName == null) { return monoError(logger, new NullPointerException("'queueName' cannot be null.")); } else if (queueName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'queueName' cannot be empty.")); } if (createQueueOptions == null) { return monoError(logger, new NullPointerException("'createQueueOptions' cannot be null.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context contextWithHeaders = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE) .addData(AZURE_REQUEST_HTTP_HEADERS_KEY, new HttpHeaders()); final String forwardToEntity = createQueueOptions.getForwardTo(); if (!CoreUtils.isNullOrEmpty(forwardToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardToEntity, contextWithHeaders); createQueueOptions.setForwardTo(getAbsoluteUrlFromEntity(forwardToEntity)); } final String forwardDlqToEntity = createQueueOptions.getForwardDeadLetteredMessagesTo(); if (!CoreUtils.isNullOrEmpty(forwardDlqToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_DLQ_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardDlqToEntity, contextWithHeaders); createQueueOptions.setForwardDeadLetteredMessagesTo(getAbsoluteUrlFromEntity(forwardDlqToEntity)); } final QueueDescription description = EntityHelper.getQueueDescription(createQueueOptions); final CreateQueueBodyContent content = new CreateQueueBodyContent() .setType(CONTENT_TYPE) .setQueueDescription(description); final CreateQueueBody createEntity = new CreateQueueBody() .setContent(content); try { return entityClient.putWithResponseAsync(queueName, createEntity, null, contextWithHeaders) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(this::deserializeQueue); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a rule with its context. * * @param ruleOptions Rule to create. * @param context Context to pass into request. * * * @return A Mono that completes with the created {@link RuleProperties}. */ Mono<Response<RuleProperties>> createRuleWithResponse(String topicName, String subscriptionName, String ruleName, CreateRuleOptions ruleOptions, Context context) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be empty.")); } if (subscriptionName == null) { return monoError(logger, new NullPointerException("'subscriptionName' cannot be null.")); } else if (subscriptionName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'subscriptionName' cannot be empty.")); } if (ruleName == null) { return monoError(logger, new NullPointerException("'ruleName' cannot be null.")); } else if (ruleName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'ruleName' cannot be empty.")); } if (ruleOptions == null) { return monoError(logger, new NullPointerException("'rule' cannot be null.")); } final RuleActionImpl action = ruleOptions.getAction() != null ? EntityHelper.toImplementation(ruleOptions.getAction()) : null; final RuleFilterImpl filter = ruleOptions.getFilter() != null ? EntityHelper.toImplementation(ruleOptions.getFilter()) : null; final RuleDescription rule = new RuleDescription() .setAction(action) .setFilter(filter) .setName(ruleName); final CreateRuleBodyContent content = new CreateRuleBodyContent() .setType(CONTENT_TYPE) .setRuleDescription(rule); final CreateRuleBody createEntity = new CreateRuleBody().setContent(content); final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return managementClient.getRules().putWithResponseAsync(topicName, subscriptionName, ruleName, createEntity, null, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeRule(response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a subscription with its context. * * @param subscriptionOptions Subscription to create. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link SubscriptionProperties}. */ Mono<Response<SubscriptionProperties>> createSubscriptionWithResponse(String topicName, String subscriptionName, CreateSubscriptionOptions subscriptionOptions, Context context) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be empty.")); } if (subscriptionName == null) { return monoError(logger, new NullPointerException("'subscriptionName' cannot be null.")); } else if (subscriptionName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'subscriptionName' cannot be empty.")); } if (subscriptionOptions == null) { return monoError(logger, new NullPointerException("'subscription' cannot be null.")); } final Context contextWithHeaders = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE) .addData(AZURE_REQUEST_HTTP_HEADERS_KEY, new HttpHeaders()); final String forwardToEntity = subscriptionOptions.getForwardTo(); if (!CoreUtils.isNullOrEmpty(forwardToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardToEntity, contextWithHeaders); subscriptionOptions.setForwardTo(getAbsoluteUrlFromEntity(forwardToEntity)); } final String forwardDlqToEntity = subscriptionOptions.getForwardDeadLetteredMessagesTo(); if (!CoreUtils.isNullOrEmpty(forwardDlqToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_DLQ_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardDlqToEntity, contextWithHeaders); subscriptionOptions.setForwardDeadLetteredMessagesTo(getAbsoluteUrlFromEntity(forwardDlqToEntity)); } final SubscriptionDescription subscription = EntityHelper.getSubscriptionDescription(subscriptionOptions); final CreateSubscriptionBodyContent content = new CreateSubscriptionBodyContent() .setType(CONTENT_TYPE) .setSubscriptionDescription(subscription); final CreateSubscriptionBody createEntity = new CreateSubscriptionBody().setContent(content); try { return managementClient.getSubscriptions().putWithResponseAsync(topicName, subscriptionName, createEntity, null, contextWithHeaders) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeSubscription(topicName, response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a topicOptions with its context. * * @param topicOptions Topic to create. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link TopicProperties}. */ Mono<Response<TopicProperties>> createTopicWithResponse(String topicName, CreateTopicOptions topicOptions, Context context) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be empty.")); } if (topicOptions == null) { return monoError(logger, new NullPointerException("'topicOptions' cannot be null")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final TopicDescription topic = EntityHelper.getTopicDescription(topicOptions); final CreateTopicBodyContent content = new CreateTopicBodyContent() .setType(CONTENT_TYPE) .setTopicDescription(topic); final CreateTopicBody createEntity = new CreateTopicBody() .setContent(content); final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return entityClient.putWithResponseAsync(topicName, createEntity, null, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(this::deserializeTopic); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes a queue with its context. * * @param queueName Name of queue to delete. * @param context Context to pass into request. * * @return A Mono that completes when the queue is deleted. */ Mono<Response<Void>> deleteQueueWithResponse(String queueName, Context context) { if (queueName == null) { return monoError(logger, new NullPointerException("'queueName' cannot be null")); } else if (queueName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'queueName' cannot be an empty string.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return entityClient.deleteWithResponseAsync(queueName, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes a queue with its context. * * @param topicName Name of topic to delete. * @param subscriptionName Name of the subscription for the rule. * @param ruleName Name of the rule. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link QueueProperties}. */ Mono<Response<Void>> deleteRuleWithResponse(String topicName, String subscriptionName, String ruleName, Context context) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } else if (subscriptionName == null) { return monoError(logger, new NullPointerException("'subscriptionName' cannot be null")); } else if (subscriptionName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'subscriptionName' cannot be an empty string.")); } else if (ruleName == null) { return monoError(logger, new NullPointerException("'ruleName' cannot be null")); } else if (ruleName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'ruleName' cannot be an empty string.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return rulesClient.deleteWithResponseAsync(topicName, subscriptionName, ruleName, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes a subscription with its context. * * @param topicName Name of topic associated with subscription to delete. * @param subscriptionName Name of subscription to delete. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link SubscriptionProperties}. */ Mono<Response<Void>> deleteSubscriptionWithResponse(String topicName, String subscriptionName, Context context) { if (subscriptionName == null) { return monoError(logger, new NullPointerException("'subscriptionName' cannot be null")); } else if (subscriptionName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'subscriptionName' cannot be an empty string.")); } else if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return managementClient.getSubscriptions().deleteWithResponseAsync(topicName, subscriptionName, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes a topic with its context. * * @param topicName Name of topic to delete. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link TopicProperties}. */ Mono<Response<Void>> deleteTopicWithResponse(String topicName, Context context) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return entityClient.deleteWithResponseAsync(topicName, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets whether an entity exists. * * @param getEntityOperation Operation to get information about entity. If {@link ResourceNotFoundException} is * thrown, then it is mapped to false. * @param <T> Entity type. * * @return True if the entity exists, false otherwise. */ <T> Mono<Response<Boolean>> getEntityExistsWithResponse(Mono<Response<T>> getEntityOperation) { return getEntityOperation.map(response -> { final boolean exists = response.getValue() != null; return (Response<Boolean>) new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), exists); }) .onErrorResume(ResourceNotFoundException.class, exception -> { final HttpResponse response = exception.getResponse(); final Response<Boolean> result = new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), false); return Mono.just(result); }); } /** * Gets a queue with its context. * * @param queueName Name of queue to fetch information for. * @param context Context to pass into request. * * @return A Mono that completes with the {@link QueueProperties}. */ <T> Mono<Response<T>> getQueueWithResponse(String queueName, Context context, Function<QueueProperties, T> mapper) { if (queueName == null) { return monoError(logger, new NullPointerException("'queueName' cannot be null")); } else if (queueName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'queueName' cannot be empty.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return entityClient.getWithResponseAsync(queueName, true, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .handle((response, sink) -> { final Response<QueueProperties> deserialize = deserializeQueue(response); if (deserialize.getValue() == null) { final HttpResponse notFoundResponse = new EntityNotFoundHttpResponse<>(deserialize); sink.error(new ResourceNotFoundException(String.format("Queue '%s' does not exist.", queueName), notFoundResponse)); } else { final T mapped = mapper.apply(deserialize.getValue()); sink.next(new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), mapped)); } }); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<RuleProperties>> getRuleWithResponse(String topicName, String subscriptionName, String ruleName, Context context) { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return rulesClient.getWithResponseAsync(topicName, subscriptionName, ruleName, true, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(this::deserializeRule); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets a subscription with its context. * * @param topicName Name of the topic associated with the subscription. * @param subscriptionName Name of subscription to fetch information for. * @param context Context to pass into request. * * @return A Mono that completes with the {@link SubscriptionProperties}. */ <T> Mono<Response<T>> getSubscriptionWithResponse(String topicName, String subscriptionName, Context context, Function<SubscriptionProperties, T> mapper) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } else if (subscriptionName == null) { return monoError(logger, new NullPointerException("'subscriptionName' cannot be null.")); } else if (subscriptionName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'subscriptionName' cannot be an empty string.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return managementClient.getSubscriptions().getWithResponseAsync(topicName, subscriptionName, true, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .handle((response, sink) -> { final Response<SubscriptionProperties> deserialize = deserializeSubscription(topicName, response); if (deserialize.getValue() == null) { final HttpResponse notFoundResponse = new EntityNotFoundHttpResponse<>(deserialize); sink.error(new ResourceNotFoundException(String.format( "Subscription '%s' in topic '%s' does not exist.", topicName, subscriptionName), notFoundResponse)); } else { final T mapped = mapper.apply(deserialize.getValue()); sink.next(new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), mapped)); } }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets the namespace properties with its context. * * @param context Context to pass into request. * * @return A Mono that completes with the {@link NamespaceProperties}. */ Mono<Response<NamespaceProperties>> getNamespacePropertiesWithResponse(Context context) { return managementClient.getNamespaces().getWithResponseAsync(context).handle((response, sink) -> { final NamespacePropertiesEntry entry = response.getValue(); if (entry == null || entry.getContent() == null) { sink.error(new AzureException( "There was no content inside namespace response. Entry: " + response)); return; } final NamespaceProperties namespaceProperties = entry.getContent().getNamespaceProperties(); final Response<NamespaceProperties> result = new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), namespaceProperties); sink.next(result); }); } /** * Gets a topic with its context. * * @param topicName Name of topic to fetch information for. * @param context Context to pass into request. * * @return A Mono that completes with the {@link TopicProperties}. */ <T> Mono<Response<T>> getTopicWithResponse(String topicName, Context context, Function<TopicProperties, T> mapper) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be empty.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return entityClient.getWithResponseAsync(topicName, true, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .handle((response, sink) -> { final Response<TopicProperties> deserialize = deserializeTopic(response); if (deserialize.getValue() == null) { final HttpResponse notFoundResponse = new EntityNotFoundHttpResponse<>(deserialize); sink.error(new ResourceNotFoundException(String.format("Topic '%s' does not exist.", topicName), notFoundResponse)); } else { final T mapped = mapper.apply(deserialize.getValue()); sink.next(new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), mapped)); } }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets the first page of queues with context. * * @param context Context to pass into request. * * @return A Mono that completes with a page of queues. */ Mono<PagedResponse<QueueProperties>> listQueuesFirstPage(Context context) { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return listQueues(0, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the next page of queues with context. * * @param continuationToken Number of items to skip in feed. * @param context Context to pass into request. * * @return A Mono that completes with a page of queues or empty if there are no items left. */ Mono<PagedResponse<QueueProperties>> listQueuesNextPage(String continuationToken, Context context) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } try { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); final int skip = Integer.parseInt(continuationToken); return listQueues(skip, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the first page of rules with context. * * @param context Context to pass into request. * * @return A Mono that completes with a page of rules. */ Mono<PagedResponse<RuleProperties>> listRulesFirstPage(String topicName, String subscriptionName, Context context) { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return listRules(topicName, subscriptionName, 0, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the next page of rules with context. * * @param continuationToken Number of items to skip in feed. * @param context Context to pass into request. * * @return A Mono that completes with a page of rules or empty if there are no items left. */ Mono<PagedResponse<RuleProperties>> listRulesNextPage(String topicName, String subscriptionName, String continuationToken, Context context) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } try { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); final int skip = Integer.parseInt(continuationToken); return listRules(topicName, subscriptionName, skip, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the first page of subscriptions with context. * * @param context Context to pass into request. * * @return A Mono that completes with a page of subscriptions. */ Mono<PagedResponse<SubscriptionProperties>> listSubscriptionsFirstPage(String topicName, Context context) { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return listSubscriptions(topicName, 0, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the next page of subscriptions with context. * * @param continuationToken Number of items to skip in feed. * @param context Context to pass into request. * * @return A Mono that completes with a page of subscriptions or empty if there are no items left. */ Mono<PagedResponse<SubscriptionProperties>> listSubscriptionsNextPage(String topicName, String continuationToken, Context context) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } try { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); final int skip = Integer.parseInt(continuationToken); return listSubscriptions(topicName, skip, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the first page of topics with context. * * @param context Context to pass into request. * * @return A Mono that completes with a page of topics. */ Mono<PagedResponse<TopicProperties>> listTopicsFirstPage(Context context) { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return listTopics(0, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the next page of topics with context. * * @param continuationToken Number of items to skip in feed. * @param context Context to pass into request. * * @return A Mono that completes with a page of topics or empty if there are no items left. */ Mono<PagedResponse<TopicProperties>> listTopicsNextPage(String continuationToken, Context context) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } try { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); final int skip = Integer.parseInt(continuationToken); return listTopics(skip, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Updates a queue with its context. * * @param queue Information about the queue to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * @param context Context to pass into request. * * @return A Mono that completes with the updated {@link QueueProperties}. */ Mono<Response<QueueProperties>> updateQueueWithResponse(QueueProperties queue, Context context) { if (queue == null) { return monoError(logger, new NullPointerException("'queue' cannot be null")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context contextWithHeaders = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE) .addData(AZURE_REQUEST_HTTP_HEADERS_KEY, new HttpHeaders()); final String forwardToEntity = queue.getForwardTo(); if (!CoreUtils.isNullOrEmpty(forwardToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardToEntity, contextWithHeaders); queue.setForwardTo(getAbsoluteUrlFromEntity(forwardToEntity)); } final String forwardDlqToEntity = queue.getForwardDeadLetteredMessagesTo(); if (!CoreUtils.isNullOrEmpty(forwardDlqToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_DLQ_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardDlqToEntity, contextWithHeaders); queue.setForwardDeadLetteredMessagesTo(getAbsoluteUrlFromEntity(forwardDlqToEntity)); } final QueueDescription queueDescription = EntityHelper.toImplementation(queue); final CreateQueueBodyContent content = new CreateQueueBodyContent() .setType(CONTENT_TYPE) .setQueueDescription(queueDescription); final CreateQueueBody createEntity = new CreateQueueBody() .setContent(content); try { return entityClient.putWithResponseAsync(queue.getName(), createEntity, "*", contextWithHeaders) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeQueue(response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates a rule with its context. * * @param rule Information about the rule to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * @param context Context to pass into request. * * @return A Mono that completes with the updated {@link RuleProperties}. */ Mono<Response<RuleProperties>> updateRuleWithResponse(String topicName, String subscriptionName, RuleProperties rule, Context context) { if (rule == null) { return monoError(logger, new NullPointerException("'rule' cannot be null")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final RuleDescription implementation = EntityHelper.toImplementation(rule); final CreateRuleBodyContent content = new CreateRuleBodyContent() .setType(CONTENT_TYPE) .setRuleDescription(implementation); final CreateRuleBody ruleBody = new CreateRuleBody() .setContent(content); final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return managementClient.getRules().putWithResponseAsync(topicName, subscriptionName, rule.getName(), ruleBody, "*", withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeRule(response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates a subscription with its context. * * @param subscription Information about the subscription to update. You must provide all the property values * that are desired on the updated entity. Any values not provided are set to the service default values. * @param context Context to pass into request. * * @return A Mono that completes with the updated {@link SubscriptionProperties}. */ Mono<Response<SubscriptionProperties>> updateSubscriptionWithResponse(SubscriptionProperties subscription, Context context) { if (subscription == null) { return monoError(logger, new NullPointerException("'subscription' cannot be null")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context contextWithHeaders = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE) .addData(AZURE_REQUEST_HTTP_HEADERS_KEY, new HttpHeaders()); final String forwardToEntity = subscription.getForwardTo(); if (!CoreUtils.isNullOrEmpty(forwardToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardToEntity, contextWithHeaders); subscription.setForwardTo(getAbsoluteUrlFromEntity(forwardToEntity)); } final String forwardDlqToEntity = subscription.getForwardDeadLetteredMessagesTo(); if (!CoreUtils.isNullOrEmpty(forwardDlqToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_DLQ_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardDlqToEntity, contextWithHeaders); subscription.setForwardDeadLetteredMessagesTo(getAbsoluteUrlFromEntity(forwardDlqToEntity)); } final String topicName = subscription.getTopicName(); final String subscriptionName = subscription.getSubscriptionName(); final SubscriptionDescription implementation = EntityHelper.toImplementation(subscription); final CreateSubscriptionBodyContent content = new CreateSubscriptionBodyContent() .setType(CONTENT_TYPE) .setSubscriptionDescription(implementation); final CreateSubscriptionBody createEntity = new CreateSubscriptionBody() .setContent(content); try { return managementClient.getSubscriptions().putWithResponseAsync(topicName, subscriptionName, createEntity, "*", contextWithHeaders) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeSubscription(topicName, response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates a topic with its context. * * @param topic Information about the topic to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * @param context Context to pass into request. * * @return A Mono that completes with the updated {@link TopicProperties}. */ Mono<Response<TopicProperties>> updateTopicWithResponse(TopicProperties topic, Context context) { if (topic == null) { return monoError(logger, new NullPointerException("'topic' cannot be null")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final TopicDescription implementation = EntityHelper.toImplementation(topic); final CreateTopicBodyContent content = new CreateTopicBodyContent() .setType(CONTENT_TYPE) .setTopicDescription(implementation); final CreateTopicBody createEntity = new CreateTopicBody() .setContent(content); final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return entityClient.putWithResponseAsync(topic.getName(), createEntity, "*", withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeTopic(response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } private <T> T deserialize(Object object, Class<T> clazz) { if (object == null) { return null; } final String contents = String.valueOf(object); if (contents.isEmpty()) { return null; } try { return serializer.deserialize(contents, clazz); } catch (IOException e) { throw logger.logExceptionAsError(new RuntimeException(String.format( "Exception while deserializing. Body: [%s]. Class: %s", contents, clazz), e)); } } /** * Given an HTTP response, will deserialize it into a strongly typed Response object. * * @param response HTTP response to deserialize response body from. * @param clazz Class to deserialize response type into. * @param <T> Class type to deserialize response into. * * @return A Response with a strongly typed response value. */ private <T> Response<T> deserialize(Response<Object> response, Class<T> clazz) { final T deserialize = deserialize(response.getValue(), clazz); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), deserialize); } /** * Converts a Response into its corresponding {@link QueueDescriptionEntry} then mapped into {@link * QueueProperties}. * * @param response HTTP Response to deserialize. * * @return The corresponding HTTP response with convenience properties set. */ private Response<QueueProperties> deserializeQueue(Response<Object> response) { final QueueDescriptionEntry entry = deserialize(response.getValue(), QueueDescriptionEntry.class); if (entry == null) { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent() == null) { logger.info("entry.getContent() is null. The entity may not exist. {}", entry); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent().getQueueDescription() == null) { final TopicDescriptionEntry entryTopic = deserialize(response.getValue(), TopicDescriptionEntry.class); if (entryTopic != null && entryTopic.getContent() != null && entryTopic.getContent().getTopicDescription() != null) { logger.warning("'{}' is not a queue, it is a topic.", entryTopic.getTitle()); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } } final QueueProperties result = EntityHelper.toModel(entry.getContent().getQueueDescription()); final String queueName = getTitleValue(entry.getTitle()); EntityHelper.setQueueName(result, queueName); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), result); } /** * Converts a Response into its corresponding {@link RuleDescriptionEntry} then mapped into {@link RuleProperties}. * * @param response HTTP Response to deserialize. * * @return The corresponding HTTP response with convenience properties set. */ private Response<RuleProperties> deserializeRule(Response<Object> response) { final RuleDescriptionEntry entry = deserialize(response.getValue(), RuleDescriptionEntry.class); if (entry == null) { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent() == null) { logger.info("entry.getContent() is null. The entity may not exist. {}", entry); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } final RuleDescription description = entry.getContent().getRuleDescription(); final RuleProperties result = EntityHelper.toModel(description); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), result); } /** * Converts a Response into its corresponding {@link SubscriptionDescriptionEntry} then mapped into {@link * SubscriptionProperties}. * * @param response HTTP Response to deserialize. * * @return The corresponding HTTP response with convenience properties set. */ private Response<SubscriptionProperties> deserializeSubscription(String topicName, Response<Object> response) { final SubscriptionDescriptionEntry entry = deserialize(response.getValue(), SubscriptionDescriptionEntry.class); if (entry == null) { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent() == null) { logger.warning("entry.getContent() is null. There should have been content returned. Entry: {}", entry); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } final SubscriptionProperties subscription = EntityHelper.toModel( entry.getContent().getSubscriptionDescription()); final String subscriptionName = getTitleValue(entry.getTitle()); EntityHelper.setSubscriptionName(subscription, subscriptionName); EntityHelper.setTopicName(subscription, topicName); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), subscription); } /** * Converts a Response into its corresponding {@link TopicDescriptionEntry} then mapped into {@link * QueueProperties}. * * @param response HTTP Response to deserialize. * * @return The corresponding HTTP response with convenience properties set. */ private Response<TopicProperties> deserializeTopic(Response<Object> response) { final TopicDescriptionEntry entry = deserialize(response.getValue(), TopicDescriptionEntry.class); if (entry == null) { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent() == null) { logger.warning("entry.getContent() is null. There should have been content returned. Entry: {}", entry); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent().getTopicDescription() == null) { final QueueDescriptionEntry entryQueue = deserialize(response.getValue(), QueueDescriptionEntry.class); if (entryQueue != null && entryQueue.getContent() != null && entryQueue.getContent().getQueueDescription() != null) { logger.warning("'{}' is not a topic, it is a queue.", entryQueue.getTitle()); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } } final TopicProperties result = EntityHelper.toModel(entry.getContent().getTopicDescription()); final String topicName = getTitleValue(entry.getTitle()); EntityHelper.setTopicName(result, topicName); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), result); } /** * Creates a {@link FeedPage} given the elements and a set of response links to get the next link from. * * @param entities Entities in the feed. * @param responseLinks Links returned from the feed. * @param <TResult> Type of Service Bus entities in page. * * @return A {@link FeedPage} indicating whether this can be continued or not. * @throws MalformedURLException if the "next" page link does not contain a well-formed URL. */ private <TResult, TFeed> FeedPage<TResult> extractPage(Response<TFeed> response, List<TResult> entities, List<ResponseLink> responseLinks) throws MalformedURLException, UnsupportedEncodingException { final Optional<ResponseLink> nextLink = responseLinks.stream() .filter(link -> link.getRel().equalsIgnoreCase("next")) .findFirst(); if (!nextLink.isPresent()) { return new FeedPage<>(response.getStatusCode(), response.getHeaders(), response.getRequest(), entities); } final URL url = new URL(nextLink.get().getHref()); final String decode = URLDecoder.decode(url.getQuery(), StandardCharsets.UTF_8.name()); final Optional<Integer> skipParameter = Arrays.stream(decode.split("&amp;|&")) .map(part -> part.split("=", 2)) .filter(parts -> parts[0].equalsIgnoreCase("$skip") && parts.length == 2) .map(parts -> Integer.valueOf(parts[1])) .findFirst(); if (skipParameter.isPresent()) { return new FeedPage<>(response.getStatusCode(), response.getHeaders(), response.getRequest(), entities, skipParameter.get()); } else { logger.warning("There should have been a skip parameter for the next page."); return new FeedPage<>(response.getStatusCode(), response.getHeaders(), response.getRequest(), entities); } } /** * Helper method that invokes the service method, extracts the data and translates it to a PagedResponse. * * @param skip Number of elements to skip. * @param context Context for the query. * * @return A Mono that completes with a paged response of queues. */ private Mono<PagedResponse<QueueProperties>> listQueues(int skip, Context context) { return managementClient.listEntitiesWithResponseAsync(QUEUES_ENTITY_TYPE, skip, NUMBER_OF_ELEMENTS, context) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .flatMap(response -> { final Response<QueueDescriptionFeed> feedResponse = deserialize(response, QueueDescriptionFeed.class); final QueueDescriptionFeed feed = feedResponse.getValue(); if (feed == null) { logger.warning("Could not deserialize QueueDescriptionFeed. skip {}, top: {}", skip, NUMBER_OF_ELEMENTS); return Mono.empty(); } final List<QueueProperties> entities = feed.getEntry().stream() .filter(e -> e.getContent() != null && e.getContent().getQueueDescription() != null) .map(e -> { final String queueName = getTitleValue(e.getTitle()); final QueueProperties queueProperties = EntityHelper.toModel( e.getContent().getQueueDescription()); EntityHelper.setQueueName(queueProperties, queueName); return queueProperties; }) .collect(Collectors.toList()); try { return Mono.just(extractPage(feedResponse, entities, feed.getLink())); } catch (MalformedURLException | UnsupportedEncodingException error) { return Mono.error(new RuntimeException("Could not parse response into FeedPage<QueueDescription>", error)); } }); } /** * Helper method that invokes the service method, extracts the data and translates it to a PagedResponse. * * @param skip Number of elements to skip. * @param context Context for the query. * * @return A Mono that completes with a paged response of rules. */ private Mono<PagedResponse<RuleProperties>> listRules(String topicName, String subscriptionName, int skip, Context context) { return managementClient.listRulesWithResponseAsync(topicName, subscriptionName, skip, NUMBER_OF_ELEMENTS, context) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .flatMap(response -> { final Response<RuleDescriptionFeed> feedResponse = deserialize(response, RuleDescriptionFeed.class); final RuleDescriptionFeed feed = feedResponse.getValue(); if (feed == null) { logger.warning("Could not deserialize RuleDescriptionFeed. skip {}, top: {}", skip, NUMBER_OF_ELEMENTS); return Mono.empty(); } final List<RuleProperties> entities = feed.getEntry().stream() .filter(e -> e.getContent() != null && e.getContent().getRuleDescription() != null) .map(e -> { return EntityHelper.toModel(e.getContent().getRuleDescription()); }) .collect(Collectors.toList()); try { return Mono.just(extractPage(feedResponse, entities, feed.getLink())); } catch (MalformedURLException | UnsupportedEncodingException error) { return Mono.error(new RuntimeException( "Could not parse response into FeedPage<RuleDescription>", error)); } }); } /** * Helper method that invokes the service method, extracts the data and translates it to a PagedResponse. * * @param skip Number of elements to skip. * @param context Context for the query. * * @return A Mono that completes with a paged response of subscriptions. */ private Mono<PagedResponse<SubscriptionProperties>> listSubscriptions(String topicName, int skip, Context context) { return managementClient.listSubscriptionsWithResponseAsync(topicName, skip, NUMBER_OF_ELEMENTS, context) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .flatMap(response -> { final Response<SubscriptionDescriptionFeed> feedResponse = deserialize(response, SubscriptionDescriptionFeed.class); final SubscriptionDescriptionFeed feed = feedResponse.getValue(); if (feed == null) { logger.warning("Could not deserialize SubscriptionDescriptionFeed. skip {}, top: {}", skip, NUMBER_OF_ELEMENTS); return Mono.empty(); } final List<SubscriptionProperties> entities = feed.getEntry().stream() .filter(e -> e.getContent() != null && e.getContent().getSubscriptionDescription() != null) .map(e -> { final String subscriptionName = getTitleValue(e.getTitle()); final SubscriptionProperties description = EntityHelper.toModel( e.getContent().getSubscriptionDescription()); EntityHelper.setTopicName(description, topicName); EntityHelper.setSubscriptionName(description, subscriptionName); return description; }) .collect(Collectors.toList()); try { return Mono.just(extractPage(feedResponse, entities, feed.getLink())); } catch (MalformedURLException | UnsupportedEncodingException error) { return Mono.error(new RuntimeException( "Could not parse response into FeedPage<SubscriptionDescription>", error)); } }); } /** * Helper method that invokes the service method, extracts the data and translates it to a PagedResponse. * * @param skip Number of elements to skip. * @param context Context for the query. * * @return A Mono that completes with a paged response of topics. */ private Mono<PagedResponse<TopicProperties>> listTopics(int skip, Context context) { return managementClient.listEntitiesWithResponseAsync(TOPICS_ENTITY_TYPE, skip, NUMBER_OF_ELEMENTS, context) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .flatMap(response -> { final Response<TopicDescriptionFeed> feedResponse = deserialize(response, TopicDescriptionFeed.class); final TopicDescriptionFeed feed = feedResponse.getValue(); if (feed == null) { logger.warning("Could not deserialize TopicDescriptionFeed. skip {}, top: {}", skip, NUMBER_OF_ELEMENTS); return Mono.empty(); } final List<TopicProperties> entities = feed.getEntry().stream() .filter(e -> e.getContent() != null && e.getContent().getTopicDescription() != null) .map(e -> { final String topicName = getTitleValue(e.getTitle()); final TopicProperties topicProperties = EntityHelper.toModel( e.getContent().getTopicDescription()); EntityHelper.setTopicName(topicProperties, topicName); return topicProperties; }) .collect(Collectors.toList()); try { return Mono.just(extractPage(feedResponse, entities, feed.getLink())); } catch (MalformedURLException | UnsupportedEncodingException error) { return Mono.error(new RuntimeException("Could not parse response into FeedPage<TopicDescription>", error)); } }); } /** * Check that the additional headers field is present and add the additional auth header * * @param headerName name of the header to be added * @param context current request context * * @return boolean representing the outcome of adding header operation */ /** * Checks if the given entity is an absolute URL, if so return it. * Otherwise, construct the URL from the given entity and return that. * * @param entity : entity to forward messages to. * * @return Forward to Entity represented as an absolute URL */ private String getAbsoluteUrlFromEntity(String entity) { try { URL url = new URL(entity); return url.toString(); } catch (MalformedURLException ex) { } UrlBuilder urlBuilder = new UrlBuilder(); urlBuilder.setScheme("https"); urlBuilder.setHost(managementClient.getEndpoint()); urlBuilder.setPath(entity); try { URL url = urlBuilder.toUrl(); return url.toString(); } catch (MalformedURLException ex) { logger.error("Failed to construct URL using the endpoint:'{}' and entity:'{}'", managementClient.getEndpoint(), entity); logger.logThrowableAsError(ex); } return null; } /** * Given an XML title element, returns the XML text inside. Jackson deserializes Objects as LinkedHashMaps. XML text * is represented as an entry with an empty string as the key. * * For example, the text returned from this {@code <title text="text/xml">QueueName</title>} is "QueueName". * * @param responseTitle XML title element. * * @return The XML text inside the title. {@code null} is returned if there is no value. */ @SuppressWarnings("unchecked") private String getTitleValue(Object responseTitle) { if (!(responseTitle instanceof Map)) { return null; } final Map<String, String> map; try { map = (Map<String, String>) responseTitle; return map.get(""); } catch (ClassCastException error) { logger.warning("Unable to cast to Map<String,String>. Title: {}", responseTitle, error); return null; } } /** * Maps an exception from the ATOM APIs to its associated {@link HttpResponseException}. * * @param exception Exception from the ATOM API. * * @return The corresponding {@link HttpResponseException} or {@code throwable} if it is not an instance of {@link * ServiceBusManagementErrorException}. */ private static Throwable mapException(Throwable exception) { if (!(exception instanceof ServiceBusManagementErrorException)) { return exception; } final ServiceBusManagementErrorException managementError = ((ServiceBusManagementErrorException) exception); final ServiceBusManagementError error = managementError.getValue(); final HttpResponse errorHttpResponse = managementError.getResponse(); final int statusCode = error != null && error.getCode() != null ? error.getCode() : errorHttpResponse.getStatusCode(); final String errorDetail = error != null && error.getDetail() != null ? error.getDetail() : managementError.getMessage(); switch (statusCode) { case 401: return new ClientAuthenticationException(errorDetail, managementError.getResponse(), exception); case 404: return new ResourceNotFoundException(errorDetail, managementError.getResponse(), exception); case 409: return new ResourceExistsException(errorDetail, managementError.getResponse(), exception); case 412: return new ResourceModifiedException(errorDetail, managementError.getResponse(), exception); default: return new HttpResponseException(errorDetail, managementError.getResponse(), exception); } } /** * A page of Service Bus entities. * * @param <T> The entity description from Service Bus. */ private static final class FeedPage<T> implements PagedResponse<T> { private final int statusCode; private final HttpHeaders header; private final HttpRequest request; private final IterableStream<T> entries; private final String continuationToken; /** * Creates a page that does not have any more pages. * * @param entries Items in the page. */ private FeedPage(int statusCode, HttpHeaders header, HttpRequest request, List<T> entries) { this.statusCode = statusCode; this.header = header; this.request = request; this.entries = new IterableStream<>(entries); this.continuationToken = null; } /** * Creates an instance that has additional pages to fetch. * * @param entries Items in the page. * @param skip Number of elements to "skip". */ private FeedPage(int statusCode, HttpHeaders header, HttpRequest request, List<T> entries, int skip) { this.statusCode = statusCode; this.header = header; this.request = request; this.entries = new IterableStream<>(entries); this.continuationToken = String.valueOf(skip); } @Override public IterableStream<T> getElements() { return entries; } @Override public String getContinuationToken() { return continuationToken; } @Override public int getStatusCode() { return statusCode; } @Override public HttpHeaders getHeaders() { return header; } @Override public HttpRequest getRequest() { return request; } @Override public void close() { } } private static final class EntityNotFoundHttpResponse<T> extends HttpResponse { private final int statusCode; private final HttpHeaders headers; private EntityNotFoundHttpResponse(Response<T> response) { super(response.getRequest()); this.headers = response.getHeaders(); this.statusCode = response.getStatusCode(); } @Override public int getStatusCode() { return statusCode; } @Override public String getHeaderValue(String name) { return headers.getValue(name); } @Override public HttpHeaders getHeaders() { return headers; } @Override public Flux<ByteBuffer> getBody() { return Flux.empty(); } @Override public Mono<byte[]> getBodyAsByteArray() { return Mono.empty(); } @Override public Mono<String> getBodyAsString() { return Mono.empty(); } @Override public Mono<String> getBodyAsString(Charset charset) { return Mono.empty(); } } }
Would this be a fatal error? Do we want to silently continue if we were unable to create a URL instead of returning null? Could the user recover from this?
private String getAbsoluteUrlFromEntity(String entity) { try { URL url = new URL(entity); return url.toString(); } catch (MalformedURLException ex) { } UrlBuilder urlBuilder = new UrlBuilder(); urlBuilder.setScheme("https"); urlBuilder.setHost(managementClient.getEndpoint()); urlBuilder.setPath(entity); try { URL url = urlBuilder.toUrl(); return url.toString(); } catch (MalformedURLException ex) { logger.error("Failed to construct URL using the endpoint:'{}' and entity:'{}'", managementClient.getEndpoint(), entity); logger.logThrowableAsError(ex); } return null; }
logger.error("Failed to construct URL using the endpoint:'{}' and entity:'{}'",
private String getAbsoluteUrlFromEntity(String entity) { try { URL url = new URL(entity); return url.toString(); } catch (MalformedURLException ex) { } UrlBuilder urlBuilder = new UrlBuilder(); urlBuilder.setScheme("https"); urlBuilder.setHost(managementClient.getEndpoint()); urlBuilder.setPath(entity); try { URL url = urlBuilder.toUrl(); return url.toString(); } catch (MalformedURLException ex) { logger.error("Failed to construct URL using the endpoint:'{}' and entity:'{}'", managementClient.getEndpoint(), entity); logger.logThrowableAsError(ex); } return null; }
class ServiceBusAdministrationAsyncClient { private static final String CONTENT_TYPE = "application/xml"; private static final String QUEUES_ENTITY_TYPE = "queues"; private static final String TOPICS_ENTITY_TYPE = "topics"; private static final int NUMBER_OF_ELEMENTS = 100; private final ServiceBusManagementClientImpl managementClient; private final EntitiesImpl entityClient; private final ClientLogger logger = new ClientLogger(ServiceBusAdministrationAsyncClient.class); private final ServiceBusManagementSerializer serializer; private final RulesImpl rulesClient; /** * Creates a new instance with the given management client and serializer. * * @param managementClient Client to make management calls. * @param serializer Serializer to deserialize ATOM XML responses. * * @throws NullPointerException if any one of {@code managementClient, serializer, credential} is null. */ ServiceBusAdministrationAsyncClient(ServiceBusManagementClientImpl managementClient, ServiceBusManagementSerializer serializer) { this.serializer = Objects.requireNonNull(serializer, "'serializer' cannot be null."); this.managementClient = Objects.requireNonNull(managementClient, "'managementClient' cannot be null."); this.entityClient = managementClient.getEntities(); this.rulesClient = managementClient.getRules(); } /** * Creates a queue with the given name. * * @param queueName Name of the queue to create. * * @return A Mono that completes with information about the created queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code queueName} is null. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws ResourceExistsException if a queue exists with the same {@code queueName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueProperties> createQueue(String queueName) { try { return createQueue(queueName, new CreateQueueOptions()); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Creates a queue with the {@link CreateQueueOptions} and given queue name. * * @param queueName Name of the queue to create. * @param queueOptions Options about the queue to create. * * @return A Mono that completes with information about the created queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} or {@code queueOptions} is null. * @throws ResourceExistsException if a queue exists with the same {@link QueueProperties * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueProperties> createQueue(String queueName, CreateQueueOptions queueOptions) { return createQueueWithResponse(queueName, queueOptions).map(Response::getValue); } /** * Creates a queue and returns the created queue in addition to the HTTP response. * * @param queueName Name of the queue to create. * @param queueOptions Options about the queue to create. * * @return A Mono that returns the created queue in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} or {@code queueOptions} is null. * @throws ResourceExistsException if a queue exists with the same {@link QueueProperties * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<QueueProperties>> createQueueWithResponse(String queueName, CreateQueueOptions queueOptions) { return withContext(context -> createQueueWithResponse(queueName, queueOptions, context)); } /** * Creates a rule under the given topic and subscription * * @param topicName Name of the topic associated with rule. * @param subscriptionName Name of the subscription associated with the rule. * @param ruleName Name of the rule. * * @return A Mono that completes with information about the created rule. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} are are empty strings. * @throws NullPointerException if {@code topicName} or {@code ruleName} are are null. * @throws ResourceExistsException if a rule exists with the same topic, subscription, and rule name. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<RuleProperties> createRule(String topicName, String subscriptionName, String ruleName) { try { return createRule(topicName, subscriptionName, ruleName, new CreateRuleOptions()); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Creates a rule with the {@link CreateRuleOptions}. * * @param topicName Name of the topic associated with rule. * @param subscriptionName Name of the subscription associated with the rule. * @param ruleName Name of the rule. * @param ruleOptions Information about the rule to create. * * @return A Mono that completes with information about the created rule. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} are are empty strings. * @throws NullPointerException if {@code topicName}, {@code ruleName}, or {@code ruleOptions} * are are null. * @throws ResourceExistsException if a rule exists with the same topic and rule name. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<RuleProperties> createRule(String topicName, String subscriptionName, String ruleName, CreateRuleOptions ruleOptions) { return createRuleWithResponse(topicName, subscriptionName, ruleName, ruleOptions) .map(Response::getValue); } /** * Creates a rule and returns the created rule in addition to the HTTP response. * * @param topicName Name of the topic associated with rule. * @param subscriptionName Name of the subscription associated with the rule. * @param ruleName Name of the rule. * @param ruleOptions Information about the rule to create. * * @return A Mono that returns the created rule in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} are are empty strings. * @throws NullPointerException if {@code topicName}, {@code ruleName}, or {@code ruleOptions} * are are null. * @throws ResourceExistsException if a rule exists with the same topic and rule name. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<RuleProperties>> createRuleWithResponse(String topicName, String subscriptionName, String ruleName, CreateRuleOptions ruleOptions) { return withContext(context -> createRuleWithResponse(topicName, subscriptionName, ruleName, ruleOptions, context)); } /** * Creates a subscription with the given topic and subscription names. * * @param topicName Name of the topic associated with subscription. * @param subscriptionName Name of the subscription. * * @return A Mono that completes with information about the created subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are are empty strings. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} are are null. * @throws ResourceExistsException if a subscription exists with the same topic and subscription name. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionProperties> createSubscription(String topicName, String subscriptionName) { try { return createSubscription(topicName, subscriptionName, new CreateSubscriptionOptions()); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Creates a subscription with the {@link CreateSubscriptionOptions}. * * @param topicName Name of the topic associated with subscription. * @param subscriptionName Name of the subscription. * @param subscriptionOptions Information about the subscription to create. * * @return A Mono that completes with information about the created subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are are empty strings. * @throws NullPointerException if {@code topicName}, {@code subscriptionName}, or {@code subscriptionOptions} * are are null. * @throws ResourceExistsException if a subscription exists with the same topic and subscription name. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionProperties> createSubscription(String topicName, String subscriptionName, CreateSubscriptionOptions subscriptionOptions) { return createSubscriptionWithResponse(topicName, subscriptionName, subscriptionOptions) .map(Response::getValue); } /** * Creates a subscription and returns the created subscription in addition to the HTTP response. * * @param topicName Name of the topic associated with subscription. * @param subscriptionName Name of the subscription. * @param subscriptionOptions Information about the subscription to create. * * @return A Mono that returns the created subscription in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are are empty strings. * @throws NullPointerException if {@code topicName}, {@code subscriptionName}, or {@code subscriptionOptions} * are are null. * @throws ResourceExistsException if a subscription exists with the same topic and subscription name. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SubscriptionProperties>> createSubscriptionWithResponse(String topicName, String subscriptionName, CreateSubscriptionOptions subscriptionOptions) { return withContext(context -> createSubscriptionWithResponse(topicName, subscriptionName, subscriptionOptions, context)); } /** * Creates a topic with the given name. * * @param topicName Name of the topic to create. * * @return A Mono that completes with information about the created topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code topicName} is null. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws ResourceExistsException if a topic exists with the same {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicProperties> createTopic(String topicName) { try { return createTopic(topicName, new CreateTopicOptions()); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Creates a topic with the {@link CreateTopicOptions}. * * @param topicName Name of the topic to create. * @param topicOptions The options used to create the topic. * * @return A Mono that completes with information about the created topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code topicOptions} is null. * @throws ResourceExistsException if a topic exists with the same {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicProperties> createTopic(String topicName, CreateTopicOptions topicOptions) { return createTopicWithResponse(topicName, topicOptions).map(Response::getValue); } /** * Creates a topic and returns the created topic in addition to the HTTP response. * * @param topicName Name of the topic to create. * @param topicOptions The options used to create the topic. * * @return A Mono that returns the created topic in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link TopicProperties * string. * @throws NullPointerException if {@code topicName} or {@code topicOptions} is null. * @throws ResourceExistsException if a topic exists with the same {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<TopicProperties>> createTopicWithResponse(String topicName, CreateTopicOptions topicOptions) { return withContext(context -> createTopicWithResponse(topicName, topicOptions, context)); } /** * Deletes a queue the matching {@code queueName}. * * @param queueName Name of queue to delete. * * @return A Mono that completes when the queue is deleted. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws NullPointerException if {@code queueName} is null. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteQueue(String queueName) { return deleteQueueWithResponse(queueName).then(); } /** * Deletes a queue the matching {@code queueName} and returns the HTTP response. * * @param queueName Name of queue to delete. * * @return A Mono that completes when the queue is deleted and returns the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws NullPointerException if {@code queueName} is null. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteQueueWithResponse(String queueName) { return withContext(context -> deleteQueueWithResponse(queueName, context)); } /** * Deletes a rule the matching {@code ruleName}. * * @param topicName Name of topic associated with rule to delete. * @param subscriptionName Name of the subscription associated with the rule to delete. * @param ruleName Name of rule to delete. * * @return A Mono that completes when the rule is deleted. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code ruleName} is null. * @throws ResourceNotFoundException if the {@code ruleName} does not exist. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteRule(String topicName, String subscriptionName, String ruleName) { return deleteRuleWithResponse(topicName, subscriptionName, ruleName).then(); } /** * Deletes a rule the matching {@code ruleName} and returns the HTTP response. * * @param topicName Name of topic associated with rule to delete. * @param subscriptionName Name of the subscription associated with the rule to delete. * @param ruleName Name of rule to delete. * * @return A Mono that completes when the rule is deleted and returns the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName}, {@code subscriptionName}, or {@code ruleName} is an * empty string. * @throws NullPointerException if {@code topicName}, {@code subscriptionName}, or {@code ruleName} is null. * @throws ResourceNotFoundException if the {@code ruleName} does not exist. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteRuleWithResponse(String topicName, String subscriptionName, String ruleName) { return withContext(context -> deleteRuleWithResponse(topicName, subscriptionName, ruleName, context)); } /** * Deletes a subscription the matching {@code subscriptionName}. * * @param topicName Name of topic associated with subscription to delete. * @param subscriptionName Name of subscription to delete. * * @return A Mono that completes when the subscription is deleted. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} is null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteSubscription(String topicName, String subscriptionName) { return deleteSubscriptionWithResponse(topicName, subscriptionName).then(); } /** * Deletes a subscription the matching {@code subscriptionName} and returns the HTTP response. * * @param topicName Name of topic associated with subscription to delete. * @param subscriptionName Name of subscription to delete. * * @return A Mono that completes when the subscription is deleted and returns the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} is null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteSubscriptionWithResponse(String topicName, String subscriptionName) { return withContext(context -> deleteSubscriptionWithResponse(topicName, subscriptionName, context)); } /** * Deletes a topic the matching {@code topicName}. * * @param topicName Name of topic to delete. * * @return A Mono that completes when the topic is deleted. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteTopic(String topicName) { return deleteTopicWithResponse(topicName).then(); } /** * Deletes a topic the matching {@code topicName} and returns the HTTP response. * * @param topicName Name of topic to delete. * * @return A Mono that completes when the topic is deleted and returns the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteTopicWithResponse(String topicName) { return withContext(context -> deleteTopicWithResponse(topicName, context)); } /** * Gets information about the queue. * * @param queueName Name of queue to get information about. * * @return A Mono that completes with information about the queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueProperties> getQueue(String queueName) { return getQueueWithResponse(queueName).map(Response::getValue); } /** * Gets information about the queue along with its HTTP response. * * @param queueName Name of queue to get information about. * * @return A Mono that completes with information about the queue and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<QueueProperties>> getQueueWithResponse(String queueName) { return withContext(context -> getQueueWithResponse(queueName, context, Function.identity())); } /** * Gets whether or not a queue with {@code queueName} exists in the Service Bus namespace. * * @param queueName Name of the queue. * * @return A Mono that completes indicating whether or not the queue exists. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Boolean> getQueueExists(String queueName) { return getQueueExistsWithResponse(queueName).map(Response::getValue); } /** * Gets whether or not a queue with {@code queueName} exists in the Service Bus namespace. * * @param queueName Name of the queue. * * @return A Mono that completes indicating whether or not the queue exists along with its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Boolean>> getQueueExistsWithResponse(String queueName) { return getEntityExistsWithResponse(getQueueWithResponse(queueName)); } /** * Gets runtime properties about the queue. * * @param queueName Name of queue to get information about. * * @return A Mono that completes with runtime properties about the queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueRuntimeProperties> getQueueRuntimeProperties(String queueName) { return getQueueRuntimePropertiesWithResponse(queueName).map(response -> response.getValue()); } /** * Gets runtime properties about the queue along with its HTTP response. * * @param queueName Name of queue to get information about. * * @return A Mono that completes with runtime properties about the queue and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<QueueRuntimeProperties>> getQueueRuntimePropertiesWithResponse(String queueName) { return withContext(context -> getQueueWithResponse(queueName, context, QueueRuntimeProperties::new)); } /** * Gets information about the Service Bus namespace. * * @return A Mono that completes with information about the Service Bus namespace. * @throws ClientAuthenticationException if the client's credentials do not have access to the namespace. * @throws HttpResponseException If error occurred processing the request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<NamespaceProperties> getNamespaceProperties() { return getNamespacePropertiesWithResponse().map(Response::getValue); } /** * Gets information about the Service Bus namespace along with its HTTP response. * * @return A Mono that completes with information about the namespace and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<NamespaceProperties>> getNamespacePropertiesWithResponse() { return withContext(this::getNamespacePropertiesWithResponse); } /** * Gets a rule from the service namespace. * * Only following data types are deserialized in Filters and Action parameters - string, int, long, boolean, double, * and OffsetDateTime. Other data types would return its string value. * * @param topicName The name of the topic relative to service bus namespace. * @param subscriptionName The subscription name the rule belongs to. * @param ruleName The name of the rule to retrieve. * * @return The associated rule. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<RuleProperties> getRule(String topicName, String subscriptionName, String ruleName) { return getRuleWithResponse(topicName, subscriptionName, ruleName).map(response -> response.getValue()); } /** * Gets a rule from the service namespace. * * Only following data types are deserialized in Filters and Action parameters - string, int, long, bool, double, * and OffsetDateTime. Other data types would return its string value. * * @param topicName The name of the topic relative to service bus namespace. * @param subscriptionName The subscription name the rule belongs to. * @param ruleName The name of the rule to retrieve. * * @return The associated rule with the corresponding HTTP response. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<RuleProperties>> getRuleWithResponse(String topicName, String subscriptionName, String ruleName) { return withContext(context -> getRuleWithResponse(topicName, subscriptionName, ruleName, context)); } /** * Gets information about the queue. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return A Mono that completes with information about the subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are empty strings. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} are null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist in the {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionProperties> getSubscription(String topicName, String subscriptionName) { return getSubscriptionWithResponse(topicName, subscriptionName).map(Response::getValue); } /** * Gets information about the subscription along with its HTTP response. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return A Mono that completes with information about the subscription and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are empty strings. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} are null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SubscriptionProperties>> getSubscriptionWithResponse(String topicName, String subscriptionName) { return withContext(context -> getSubscriptionWithResponse(topicName, subscriptionName, context, Function.identity())); } /** * Gets whether or not a subscription within a topic exists. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of the subscription. * * @return A Mono that completes indicating whether or not the subscription exists. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code subscriptionName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Boolean> getSubscriptionExists(String topicName, String subscriptionName) { return getSubscriptionExistsWithResponse(topicName, subscriptionName).map(Response::getValue); } /** * Gets whether or not a subscription within a topic exists. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of the subscription. * * @return A Mono that completes indicating whether or not the subscription exists along with its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code subscriptionName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Boolean>> getSubscriptionExistsWithResponse(String topicName, String subscriptionName) { return getEntityExistsWithResponse(getSubscriptionWithResponse(topicName, subscriptionName)); } /** * Gets runtime properties about the subscription. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return A Mono that completes with runtime properties about the subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are empty strings. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} are null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionRuntimeProperties> getSubscriptionRuntimeProperties( String topicName, String subscriptionName) { return getSubscriptionRuntimePropertiesWithResponse(topicName, subscriptionName) .map(response -> response.getValue()); } /** * Gets runtime properties about the subscription. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return A Mono that completes with runtime properties about the subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code subscriptionName} is null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SubscriptionRuntimeProperties>> getSubscriptionRuntimePropertiesWithResponse( String topicName, String subscriptionName) { return withContext(context -> getSubscriptionWithResponse(topicName, subscriptionName, context, SubscriptionRuntimeProperties::new)); } /** * Gets information about the topic. * * @param topicName Name of topic to get information about. * * @return A Mono that completes with information about the topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicProperties> getTopic(String topicName) { return getTopicWithResponse(topicName).map(Response::getValue); } /** * Gets information about the topic along with its HTTP response. * * @param topicName Name of topic to get information about. * * @return A Mono that completes with information about the topic and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<TopicProperties>> getTopicWithResponse(String topicName) { return withContext(context -> getTopicWithResponse(topicName, context, Function.identity())); } /** * Gets whether or not a topic with {@code topicName} exists in the Service Bus namespace. * * @param topicName Name of the topic. * * @return A Mono that completes indicating whether or not the topic exists. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Boolean> getTopicExists(String topicName) { return getTopicExistsWithResponse(topicName).map(Response::getValue); } /** * Gets whether or not a topic with {@code topicName} exists in the Service Bus namespace. * * @param topicName Name of the topic. * * @return A Mono that completes indicating whether or not the topic exists along with its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Boolean>> getTopicExistsWithResponse(String topicName) { return getEntityExistsWithResponse(getTopicWithResponse(topicName)); } /** * Gets runtime properties about the topic. * * @param topicName Name of topic to get information about. * * @return A Mono that completes with runtime properties about the topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicRuntimeProperties> getTopicRuntimeProperties(String topicName) { return getTopicRuntimePropertiesWithResponse(topicName).map(response -> response.getValue()); } /** * Gets runtime properties about the topic with its HTTP response. * * @param topicName Name of topic to get information about. * * @return A Mono that completes with runtime properties about the topic and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<TopicRuntimeProperties>> getTopicRuntimePropertiesWithResponse(String topicName) { return withContext(context -> getTopicWithResponse(topicName, context, TopicRuntimeProperties::new)); } /** * Fetches all the queues in the Service Bus namespace. * * @return A Flux of {@link QueueProperties queues} in the Service Bus namespace. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<QueueProperties> listQueues() { return new PagedFlux<>( () -> withContext(context -> listQueuesFirstPage(context)), token -> withContext(context -> listQueuesNextPage(token, context))); } /** * Fetches all the rules for a topic and subscription. * * @param topicName The topic name under which all the rules need to be retrieved. * @param subscriptionName The name of the subscription for which all rules need to be retrieved. * * @return A Flux of {@link RuleProperties rules} for the {@code topicName} and {@code subscriptionName}. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} is null. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} is an empty string. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<RuleProperties> listRules(String topicName, String subscriptionName) { if (topicName == null) { return pagedFluxError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return pagedFluxError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } return new PagedFlux<>( () -> withContext(context -> listRulesFirstPage(topicName, subscriptionName, context)), token -> withContext(context -> listRulesNextPage(topicName, subscriptionName, token, context))); } /** * Fetches all the subscriptions for a topic. * * @param topicName The topic name under which all the subscriptions need to be retrieved. * * @return A Flux of {@link SubscriptionProperties subscriptions} for the {@code topicName}. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws NullPointerException if {@code topicName} is null. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<SubscriptionProperties> listSubscriptions(String topicName) { if (topicName == null) { return pagedFluxError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return pagedFluxError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } return new PagedFlux<>( () -> withContext(context -> listSubscriptionsFirstPage(topicName, context)), token -> withContext(context -> listSubscriptionsNextPage(topicName, token, context))); } /** * Fetches all the topics in the Service Bus namespace. * * @return A Flux of {@link TopicProperties topics} in the Service Bus namespace. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<TopicProperties> listTopics() { return new PagedFlux<>( () -> withContext(context -> listTopicsFirstPage(context)), token -> withContext(context -> listTopicsNextPage(token, context))); } /** * Updates a queue with the given {@link QueueProperties}. The {@link QueueProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link QueueProperties * <li>{@link QueueProperties * <li>{@link QueueProperties * </li> * <li>{@link QueueProperties * </ul> * * @param queue Information about the queue to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that completes with the updated queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code queue} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueProperties> updateQueue(QueueProperties queue) { return updateQueueWithResponse(queue).map(Response::getValue); } /** * Updates a queue with the given {@link QueueProperties}. The {@link QueueProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link QueueProperties * <li>{@link QueueProperties * <li>{@link QueueProperties * </li> * <li>{@link QueueProperties * </ul> * * @param queue Information about the queue to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated queue in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code queue} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<QueueProperties>> updateQueueWithResponse(QueueProperties queue) { return withContext(context -> updateQueueWithResponse(queue, context)); } /** * Updates a rule with the given {@link RuleProperties}. The {@link RuleProperties} must be fully populated as all * of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * @param topicName The topic name under which the rule is updated. * @param subscriptionName The name of the subscription for which the rule is updated. * @param rule Information about the rule to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated rule. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the rule quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link RuleProperties * @throws NullPointerException if {@code rule} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<RuleProperties> updateRule(String topicName, String subscriptionName, RuleProperties rule) { return updateRuleWithResponse(topicName, subscriptionName, rule).map(Response::getValue); } /** * Updates a rule with the given {@link RuleProperties}. The {@link RuleProperties} must be fully populated as all * of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * @param topicName The topic name under which the rule is updated. * @param subscriptionName The name of the subscription for which the rule is updated. * @param rule Information about the rule to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated rule in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the rule quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link RuleProperties * @throws NullPointerException if {@code rule} is null. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<RuleProperties>> updateRuleWithResponse(String topicName, String subscriptionName, RuleProperties rule) { return withContext(context -> updateRuleWithResponse(topicName, subscriptionName, rule, context)); } /** * Updates a subscription with the given {@link SubscriptionProperties}. The {@link SubscriptionProperties} must be * fully populated as all of the properties are replaced. If a property is not set the service default value is * used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * </ul> * * @param subscription Information about the subscription to update. You must provide all the property values * that are desired on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the subscription quota is exceeded, or an * error occurred processing the request. * @throws IllegalArgumentException if {@link SubscriptionProperties * SubscriptionProperties * @throws NullPointerException if {@code subscription} is null. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionProperties> updateSubscription(SubscriptionProperties subscription) { return updateSubscriptionWithResponse(subscription).map(Response::getValue); } /** * Updates a subscription with the given {@link SubscriptionProperties}. The {@link SubscriptionProperties} must be * fully populated as all of the properties are replaced. If a property is not set the service default value is * used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * </ul> * * @param subscription Information about the subscription to update. You must provide all the property values * that are desired on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated subscription in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the subscription quota is exceeded, or an * error occurred processing the request. * @throws IllegalArgumentException if {@link SubscriptionProperties * SubscriptionProperties * @throws NullPointerException if {@code subscription} is null. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SubscriptionProperties>> updateSubscriptionWithResponse( SubscriptionProperties subscription) { return withContext(context -> updateSubscriptionWithResponse(subscription, context)); } /** * Updates a topic with the given {@link TopicProperties}. The {@link TopicProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link TopicProperties * <li>{@link TopicProperties * </li> * </ul> * * @param topic Information about the topic to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that completes with the updated topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link TopicProperties * string. * @throws NullPointerException if {@code topic} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicProperties> updateTopic(TopicProperties topic) { return updateTopicWithResponse(topic).map(Response::getValue); } /** * Updates a topic with the given {@link TopicProperties}. The {@link TopicProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link TopicProperties * <li>{@link TopicProperties * </li> * </ul> * * @param topic Information about the topic to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that completes with the updated topic and its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link TopicProperties * string. * @throws NullPointerException if {@code topic} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<TopicProperties>> updateTopicWithResponse(TopicProperties topic) { return withContext(context -> updateTopicWithResponse(topic, context)); } /** * Creates a queue with its context. * * @param createQueueOptions Queue to create. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link QueueProperties}. */ Mono<Response<QueueProperties>> createQueueWithResponse(String queueName, CreateQueueOptions createQueueOptions, Context context) { if (queueName == null) { return monoError(logger, new NullPointerException("'queueName' cannot be null.")); } else if (queueName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'queueName' cannot be empty.")); } if (createQueueOptions == null) { return monoError(logger, new NullPointerException("'createQueueOptions' cannot be null.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context contextWithTrace = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); final Context contextWithHeaders = contextWithTrace.addData(AZURE_REQUEST_HTTP_HEADERS_KEY, new HttpHeaders()); final String forwardToEntity = createQueueOptions.getForwardTo(); if (!CoreUtils.isNullOrEmpty(forwardToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardToEntity, contextWithHeaders); createQueueOptions.setForwardTo(getAbsoluteUrlFromEntity(forwardToEntity)); } final String forwardDlqToEntity = createQueueOptions.getForwardDeadLetteredMessagesTo(); if (!CoreUtils.isNullOrEmpty(forwardDlqToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_DLQ_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardDlqToEntity, contextWithHeaders); createQueueOptions.setForwardDeadLetteredMessagesTo(getAbsoluteUrlFromEntity(forwardDlqToEntity)); } final QueueDescription description = EntityHelper.getQueueDescription(createQueueOptions); final CreateQueueBodyContent content = new CreateQueueBodyContent() .setType(CONTENT_TYPE) .setQueueDescription(description); final CreateQueueBody createEntity = new CreateQueueBody() .setContent(content); try { return entityClient.putWithResponseAsync(queueName, createEntity, null, contextWithHeaders) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(this::deserializeQueue); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a rule with its context. * * @param ruleOptions Rule to create. * @param context Context to pass into request. * * * @return A Mono that completes with the created {@link RuleProperties}. */ Mono<Response<RuleProperties>> createRuleWithResponse(String topicName, String subscriptionName, String ruleName, CreateRuleOptions ruleOptions, Context context) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be empty.")); } if (subscriptionName == null) { return monoError(logger, new NullPointerException("'subscriptionName' cannot be null.")); } else if (subscriptionName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'subscriptionName' cannot be empty.")); } if (ruleName == null) { return monoError(logger, new NullPointerException("'ruleName' cannot be null.")); } else if (ruleName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'ruleName' cannot be empty.")); } if (ruleOptions == null) { return monoError(logger, new NullPointerException("'rule' cannot be null.")); } final RuleActionImpl action = ruleOptions.getAction() != null ? EntityHelper.toImplementation(ruleOptions.getAction()) : null; final RuleFilterImpl filter = ruleOptions.getFilter() != null ? EntityHelper.toImplementation(ruleOptions.getFilter()) : null; final RuleDescription rule = new RuleDescription() .setAction(action) .setFilter(filter) .setName(ruleName); final CreateRuleBodyContent content = new CreateRuleBodyContent() .setType(CONTENT_TYPE) .setRuleDescription(rule); final CreateRuleBody createEntity = new CreateRuleBody().setContent(content); final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return managementClient.getRules().putWithResponseAsync(topicName, subscriptionName, ruleName, createEntity, null, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeRule(response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a subscription with its context. * * @param subscriptionOptions Subscription to create. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link SubscriptionProperties}. */ Mono<Response<SubscriptionProperties>> createSubscriptionWithResponse(String topicName, String subscriptionName, CreateSubscriptionOptions subscriptionOptions, Context context) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be empty.")); } if (subscriptionName == null) { return monoError(logger, new NullPointerException("'subscriptionName' cannot be null.")); } else if (subscriptionName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'subscriptionName' cannot be empty.")); } if (subscriptionOptions == null) { return monoError(logger, new NullPointerException("'subscription' cannot be null.")); } final Context contextWithTrace = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); final Context contextWithHeaders = contextWithTrace.addData(AZURE_REQUEST_HTTP_HEADERS_KEY, new HttpHeaders()); final String forwardToEntity = subscriptionOptions.getForwardTo(); if (!CoreUtils.isNullOrEmpty(forwardToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardToEntity, contextWithHeaders); subscriptionOptions.setForwardTo(getAbsoluteUrlFromEntity(forwardToEntity)); } final String forwardDlqToEntity = subscriptionOptions.getForwardDeadLetteredMessagesTo(); if (!CoreUtils.isNullOrEmpty(forwardDlqToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_DLQ_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardDlqToEntity, contextWithHeaders); subscriptionOptions.setForwardDeadLetteredMessagesTo(getAbsoluteUrlFromEntity(forwardDlqToEntity)); } final SubscriptionDescription subscription = EntityHelper.getSubscriptionDescription(subscriptionOptions); final CreateSubscriptionBodyContent content = new CreateSubscriptionBodyContent() .setType(CONTENT_TYPE) .setSubscriptionDescription(subscription); final CreateSubscriptionBody createEntity = new CreateSubscriptionBody().setContent(content); try { return managementClient.getSubscriptions().putWithResponseAsync(topicName, subscriptionName, createEntity, null, contextWithHeaders) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeSubscription(topicName, response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a topicOptions with its context. * * @param topicOptions Topic to create. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link TopicProperties}. */ Mono<Response<TopicProperties>> createTopicWithResponse(String topicName, CreateTopicOptions topicOptions, Context context) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be empty.")); } if (topicOptions == null) { return monoError(logger, new NullPointerException("'topicOptions' cannot be null")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final TopicDescription topic = EntityHelper.getTopicDescription(topicOptions); final CreateTopicBodyContent content = new CreateTopicBodyContent() .setType(CONTENT_TYPE) .setTopicDescription(topic); final CreateTopicBody createEntity = new CreateTopicBody() .setContent(content); final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return entityClient.putWithResponseAsync(topicName, createEntity, null, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(this::deserializeTopic); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes a queue with its context. * * @param queueName Name of queue to delete. * @param context Context to pass into request. * * @return A Mono that completes when the queue is deleted. */ Mono<Response<Void>> deleteQueueWithResponse(String queueName, Context context) { if (queueName == null) { return monoError(logger, new NullPointerException("'queueName' cannot be null")); } else if (queueName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'queueName' cannot be an empty string.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return entityClient.deleteWithResponseAsync(queueName, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes a queue with its context. * * @param topicName Name of topic to delete. * @param subscriptionName Name of the subscription for the rule. * @param ruleName Name of the rule. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link QueueProperties}. */ Mono<Response<Void>> deleteRuleWithResponse(String topicName, String subscriptionName, String ruleName, Context context) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } else if (subscriptionName == null) { return monoError(logger, new NullPointerException("'subscriptionName' cannot be null")); } else if (subscriptionName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'subscriptionName' cannot be an empty string.")); } else if (ruleName == null) { return monoError(logger, new NullPointerException("'ruleName' cannot be null")); } else if (ruleName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'ruleName' cannot be an empty string.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return rulesClient.deleteWithResponseAsync(topicName, subscriptionName, ruleName, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes a subscription with its context. * * @param topicName Name of topic associated with subscription to delete. * @param subscriptionName Name of subscription to delete. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link SubscriptionProperties}. */ Mono<Response<Void>> deleteSubscriptionWithResponse(String topicName, String subscriptionName, Context context) { if (subscriptionName == null) { return monoError(logger, new NullPointerException("'subscriptionName' cannot be null")); } else if (subscriptionName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'subscriptionName' cannot be an empty string.")); } else if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return managementClient.getSubscriptions().deleteWithResponseAsync(topicName, subscriptionName, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes a topic with its context. * * @param topicName Name of topic to delete. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link TopicProperties}. */ Mono<Response<Void>> deleteTopicWithResponse(String topicName, Context context) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return entityClient.deleteWithResponseAsync(topicName, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets whether an entity exists. * * @param getEntityOperation Operation to get information about entity. If {@link ResourceNotFoundException} is * thrown, then it is mapped to false. * @param <T> Entity type. * * @return True if the entity exists, false otherwise. */ <T> Mono<Response<Boolean>> getEntityExistsWithResponse(Mono<Response<T>> getEntityOperation) { return getEntityOperation.map(response -> { final boolean exists = response.getValue() != null; return (Response<Boolean>) new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), exists); }) .onErrorResume(ResourceNotFoundException.class, exception -> { final HttpResponse response = exception.getResponse(); final Response<Boolean> result = new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), false); return Mono.just(result); }); } /** * Gets a queue with its context. * * @param queueName Name of queue to fetch information for. * @param context Context to pass into request. * * @return A Mono that completes with the {@link QueueProperties}. */ <T> Mono<Response<T>> getQueueWithResponse(String queueName, Context context, Function<QueueProperties, T> mapper) { if (queueName == null) { return monoError(logger, new NullPointerException("'queueName' cannot be null")); } else if (queueName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'queueName' cannot be empty.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return entityClient.getWithResponseAsync(queueName, true, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .handle((response, sink) -> { final Response<QueueProperties> deserialize = deserializeQueue(response); if (deserialize.getValue() == null) { final HttpResponse notFoundResponse = new EntityNotFoundHttpResponse<>(deserialize); sink.error(new ResourceNotFoundException(String.format("Queue '%s' does not exist.", queueName), notFoundResponse)); } else { final T mapped = mapper.apply(deserialize.getValue()); sink.next(new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), mapped)); } }); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<RuleProperties>> getRuleWithResponse(String topicName, String subscriptionName, String ruleName, Context context) { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return rulesClient.getWithResponseAsync(topicName, subscriptionName, ruleName, true, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(this::deserializeRule); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets a subscription with its context. * * @param topicName Name of the topic associated with the subscription. * @param subscriptionName Name of subscription to fetch information for. * @param context Context to pass into request. * * @return A Mono that completes with the {@link SubscriptionProperties}. */ <T> Mono<Response<T>> getSubscriptionWithResponse(String topicName, String subscriptionName, Context context, Function<SubscriptionProperties, T> mapper) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } else if (subscriptionName == null) { return monoError(logger, new NullPointerException("'subscriptionName' cannot be null.")); } else if (subscriptionName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'subscriptionName' cannot be an empty string.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return managementClient.getSubscriptions().getWithResponseAsync(topicName, subscriptionName, true, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .handle((response, sink) -> { final Response<SubscriptionProperties> deserialize = deserializeSubscription(topicName, response); if (deserialize.getValue() == null) { final HttpResponse notFoundResponse = new EntityNotFoundHttpResponse<>(deserialize); sink.error(new ResourceNotFoundException(String.format( "Subscription '%s' in topic '%s' does not exist.", topicName, subscriptionName), notFoundResponse)); } else { final T mapped = mapper.apply(deserialize.getValue()); sink.next(new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), mapped)); } }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets the namespace properties with its context. * * @param context Context to pass into request. * * @return A Mono that completes with the {@link NamespaceProperties}. */ Mono<Response<NamespaceProperties>> getNamespacePropertiesWithResponse(Context context) { return managementClient.getNamespaces().getWithResponseAsync(context).handle((response, sink) -> { final NamespacePropertiesEntry entry = response.getValue(); if (entry == null || entry.getContent() == null) { sink.error(new AzureException( "There was no content inside namespace response. Entry: " + response)); return; } final NamespaceProperties namespaceProperties = entry.getContent().getNamespaceProperties(); final Response<NamespaceProperties> result = new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), namespaceProperties); sink.next(result); }); } /** * Gets a topic with its context. * * @param topicName Name of topic to fetch information for. * @param context Context to pass into request. * * @return A Mono that completes with the {@link TopicProperties}. */ <T> Mono<Response<T>> getTopicWithResponse(String topicName, Context context, Function<TopicProperties, T> mapper) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be empty.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return entityClient.getWithResponseAsync(topicName, true, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .handle((response, sink) -> { final Response<TopicProperties> deserialize = deserializeTopic(response); if (deserialize.getValue() == null) { final HttpResponse notFoundResponse = new EntityNotFoundHttpResponse<>(deserialize); sink.error(new ResourceNotFoundException(String.format("Topic '%s' does not exist.", topicName), notFoundResponse)); } else { final T mapped = mapper.apply(deserialize.getValue()); sink.next(new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), mapped)); } }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets the first page of queues with context. * * @param context Context to pass into request. * * @return A Mono that completes with a page of queues. */ Mono<PagedResponse<QueueProperties>> listQueuesFirstPage(Context context) { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return listQueues(0, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the next page of queues with context. * * @param continuationToken Number of items to skip in feed. * @param context Context to pass into request. * * @return A Mono that completes with a page of queues or empty if there are no items left. */ Mono<PagedResponse<QueueProperties>> listQueuesNextPage(String continuationToken, Context context) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } try { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); final int skip = Integer.parseInt(continuationToken); return listQueues(skip, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the first page of rules with context. * * @param context Context to pass into request. * * @return A Mono that completes with a page of rules. */ Mono<PagedResponse<RuleProperties>> listRulesFirstPage(String topicName, String subscriptionName, Context context) { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return listRules(topicName, subscriptionName, 0, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the next page of rules with context. * * @param continuationToken Number of items to skip in feed. * @param context Context to pass into request. * * @return A Mono that completes with a page of rules or empty if there are no items left. */ Mono<PagedResponse<RuleProperties>> listRulesNextPage(String topicName, String subscriptionName, String continuationToken, Context context) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } try { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); final int skip = Integer.parseInt(continuationToken); return listRules(topicName, subscriptionName, skip, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the first page of subscriptions with context. * * @param context Context to pass into request. * * @return A Mono that completes with a page of subscriptions. */ Mono<PagedResponse<SubscriptionProperties>> listSubscriptionsFirstPage(String topicName, Context context) { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return listSubscriptions(topicName, 0, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the next page of subscriptions with context. * * @param continuationToken Number of items to skip in feed. * @param context Context to pass into request. * * @return A Mono that completes with a page of subscriptions or empty if there are no items left. */ Mono<PagedResponse<SubscriptionProperties>> listSubscriptionsNextPage(String topicName, String continuationToken, Context context) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } try { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); final int skip = Integer.parseInt(continuationToken); return listSubscriptions(topicName, skip, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the first page of topics with context. * * @param context Context to pass into request. * * @return A Mono that completes with a page of topics. */ Mono<PagedResponse<TopicProperties>> listTopicsFirstPage(Context context) { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return listTopics(0, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the next page of topics with context. * * @param continuationToken Number of items to skip in feed. * @param context Context to pass into request. * * @return A Mono that completes with a page of topics or empty if there are no items left. */ Mono<PagedResponse<TopicProperties>> listTopicsNextPage(String continuationToken, Context context) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } try { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); final int skip = Integer.parseInt(continuationToken); return listTopics(skip, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Updates a queue with its context. * * @param queue Information about the queue to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * @param context Context to pass into request. * * @return A Mono that completes with the updated {@link QueueProperties}. */ Mono<Response<QueueProperties>> updateQueueWithResponse(QueueProperties queue, Context context) { if (queue == null) { return monoError(logger, new NullPointerException("'queue' cannot be null")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context contextWithTrace = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); final Context contextWithHeaders = contextWithTrace.addData(AZURE_REQUEST_HTTP_HEADERS_KEY, new HttpHeaders()); final String forwardToEntity = queue.getForwardTo(); if (!CoreUtils.isNullOrEmpty(forwardToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardToEntity, contextWithHeaders); queue.setForwardTo(getAbsoluteUrlFromEntity(forwardToEntity)); } final String forwardDlqToEntity = queue.getForwardDeadLetteredMessagesTo(); if (!CoreUtils.isNullOrEmpty(forwardDlqToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_DLQ_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardDlqToEntity, contextWithHeaders); queue.setForwardDeadLetteredMessagesTo(getAbsoluteUrlFromEntity(forwardDlqToEntity)); } final QueueDescription queueDescription = EntityHelper.toImplementation(queue); final CreateQueueBodyContent content = new CreateQueueBodyContent() .setType(CONTENT_TYPE) .setQueueDescription(queueDescription); final CreateQueueBody createEntity = new CreateQueueBody() .setContent(content); try { return entityClient.putWithResponseAsync(queue.getName(), createEntity, "*", contextWithHeaders) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeQueue(response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates a rule with its context. * * @param rule Information about the rule to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * @param context Context to pass into request. * * @return A Mono that completes with the updated {@link RuleProperties}. */ Mono<Response<RuleProperties>> updateRuleWithResponse(String topicName, String subscriptionName, RuleProperties rule, Context context) { if (rule == null) { return monoError(logger, new NullPointerException("'rule' cannot be null")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final RuleDescription implementation = EntityHelper.toImplementation(rule); final CreateRuleBodyContent content = new CreateRuleBodyContent() .setType(CONTENT_TYPE) .setRuleDescription(implementation); final CreateRuleBody ruleBody = new CreateRuleBody() .setContent(content); final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return managementClient.getRules().putWithResponseAsync(topicName, subscriptionName, rule.getName(), ruleBody, "*", withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeRule(response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates a subscription with its context. * * @param subscription Information about the subscription to update. You must provide all the property values * that are desired on the updated entity. Any values not provided are set to the service default values. * @param context Context to pass into request. * * @return A Mono that completes with the updated {@link SubscriptionProperties}. */ Mono<Response<SubscriptionProperties>> updateSubscriptionWithResponse(SubscriptionProperties subscription, Context context) { if (subscription == null) { return monoError(logger, new NullPointerException("'subscription' cannot be null")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context contextWithTrace = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); final Context contextWithHeaders = contextWithTrace.addData(AZURE_REQUEST_HTTP_HEADERS_KEY, new HttpHeaders()); final String forwardToEntity = subscription.getForwardTo(); if (!CoreUtils.isNullOrEmpty(forwardToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardToEntity, contextWithHeaders); subscription.setForwardTo(getAbsoluteUrlFromEntity(forwardToEntity)); } final String forwardDlqToEntity = subscription.getForwardDeadLetteredMessagesTo(); if (!CoreUtils.isNullOrEmpty(forwardDlqToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_DLQ_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardDlqToEntity, contextWithHeaders); subscription.setForwardDeadLetteredMessagesTo(getAbsoluteUrlFromEntity(forwardDlqToEntity)); } final String topicName = subscription.getTopicName(); final String subscriptionName = subscription.getSubscriptionName(); final SubscriptionDescription implementation = EntityHelper.toImplementation(subscription); final CreateSubscriptionBodyContent content = new CreateSubscriptionBodyContent() .setType(CONTENT_TYPE) .setSubscriptionDescription(implementation); final CreateSubscriptionBody createEntity = new CreateSubscriptionBody() .setContent(content); try { return managementClient.getSubscriptions().putWithResponseAsync(topicName, subscriptionName, createEntity, "*", contextWithHeaders) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeSubscription(topicName, response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates a topic with its context. * * @param topic Information about the topic to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * @param context Context to pass into request. * * @return A Mono that completes with the updated {@link TopicProperties}. */ Mono<Response<TopicProperties>> updateTopicWithResponse(TopicProperties topic, Context context) { if (topic == null) { return monoError(logger, new NullPointerException("'topic' cannot be null")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final TopicDescription implementation = EntityHelper.toImplementation(topic); final CreateTopicBodyContent content = new CreateTopicBodyContent() .setType(CONTENT_TYPE) .setTopicDescription(implementation); final CreateTopicBody createEntity = new CreateTopicBody() .setContent(content); final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return entityClient.putWithResponseAsync(topic.getName(), createEntity, "*", withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeTopic(response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } private <T> T deserialize(Object object, Class<T> clazz) { if (object == null) { return null; } final String contents = String.valueOf(object); if (contents.isEmpty()) { return null; } try { return serializer.deserialize(contents, clazz); } catch (IOException e) { throw logger.logExceptionAsError(new RuntimeException(String.format( "Exception while deserializing. Body: [%s]. Class: %s", contents, clazz), e)); } } /** * Given an HTTP response, will deserialize it into a strongly typed Response object. * * @param response HTTP response to deserialize response body from. * @param clazz Class to deserialize response type into. * @param <T> Class type to deserialize response into. * * @return A Response with a strongly typed response value. */ private <T> Response<T> deserialize(Response<Object> response, Class<T> clazz) { final T deserialize = deserialize(response.getValue(), clazz); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), deserialize); } /** * Converts a Response into its corresponding {@link QueueDescriptionEntry} then mapped into {@link * QueueProperties}. * * @param response HTTP Response to deserialize. * * @return The corresponding HTTP response with convenience properties set. */ private Response<QueueProperties> deserializeQueue(Response<Object> response) { final QueueDescriptionEntry entry = deserialize(response.getValue(), QueueDescriptionEntry.class); if (entry == null) { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent() == null) { logger.info("entry.getContent() is null. The entity may not exist. {}", entry); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent().getQueueDescription() == null) { final TopicDescriptionEntry entryTopic = deserialize(response.getValue(), TopicDescriptionEntry.class); if (entryTopic != null && entryTopic.getContent() != null && entryTopic.getContent().getTopicDescription() != null) { logger.warning("'{}' is not a queue, it is a topic.", entryTopic.getTitle()); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } } final QueueProperties result = EntityHelper.toModel(entry.getContent().getQueueDescription()); final String queueName = getTitleValue(entry.getTitle()); EntityHelper.setQueueName(result, queueName); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), result); } /** * Converts a Response into its corresponding {@link RuleDescriptionEntry} then mapped into {@link RuleProperties}. * * @param response HTTP Response to deserialize. * * @return The corresponding HTTP response with convenience properties set. */ private Response<RuleProperties> deserializeRule(Response<Object> response) { final RuleDescriptionEntry entry = deserialize(response.getValue(), RuleDescriptionEntry.class); if (entry == null) { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent() == null) { logger.info("entry.getContent() is null. The entity may not exist. {}", entry); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } final RuleDescription description = entry.getContent().getRuleDescription(); final RuleProperties result = EntityHelper.toModel(description); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), result); } /** * Converts a Response into its corresponding {@link SubscriptionDescriptionEntry} then mapped into {@link * SubscriptionProperties}. * * @param response HTTP Response to deserialize. * * @return The corresponding HTTP response with convenience properties set. */ private Response<SubscriptionProperties> deserializeSubscription(String topicName, Response<Object> response) { final SubscriptionDescriptionEntry entry = deserialize(response.getValue(), SubscriptionDescriptionEntry.class); if (entry == null) { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent() == null) { logger.warning("entry.getContent() is null. There should have been content returned. Entry: {}", entry); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } final SubscriptionProperties subscription = EntityHelper.toModel( entry.getContent().getSubscriptionDescription()); final String subscriptionName = getTitleValue(entry.getTitle()); EntityHelper.setSubscriptionName(subscription, subscriptionName); EntityHelper.setTopicName(subscription, topicName); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), subscription); } /** * Converts a Response into its corresponding {@link TopicDescriptionEntry} then mapped into {@link * QueueProperties}. * * @param response HTTP Response to deserialize. * * @return The corresponding HTTP response with convenience properties set. */ private Response<TopicProperties> deserializeTopic(Response<Object> response) { final TopicDescriptionEntry entry = deserialize(response.getValue(), TopicDescriptionEntry.class); if (entry == null) { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent() == null) { logger.warning("entry.getContent() is null. There should have been content returned. Entry: {}", entry); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent().getTopicDescription() == null) { final QueueDescriptionEntry entryQueue = deserialize(response.getValue(), QueueDescriptionEntry.class); if (entryQueue != null && entryQueue.getContent() != null && entryQueue.getContent().getQueueDescription() != null) { logger.warning("'{}' is not a topic, it is a queue.", entryQueue.getTitle()); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } } final TopicProperties result = EntityHelper.toModel(entry.getContent().getTopicDescription()); final String topicName = getTitleValue(entry.getTitle()); EntityHelper.setTopicName(result, topicName); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), result); } /** * Creates a {@link FeedPage} given the elements and a set of response links to get the next link from. * * @param entities Entities in the feed. * @param responseLinks Links returned from the feed. * @param <TResult> Type of Service Bus entities in page. * * @return A {@link FeedPage} indicating whether this can be continued or not. * @throws MalformedURLException if the "next" page link does not contain a well-formed URL. */ private <TResult, TFeed> FeedPage<TResult> extractPage(Response<TFeed> response, List<TResult> entities, List<ResponseLink> responseLinks) throws MalformedURLException, UnsupportedEncodingException { final Optional<ResponseLink> nextLink = responseLinks.stream() .filter(link -> link.getRel().equalsIgnoreCase("next")) .findFirst(); if (!nextLink.isPresent()) { return new FeedPage<>(response.getStatusCode(), response.getHeaders(), response.getRequest(), entities); } final URL url = new URL(nextLink.get().getHref()); final String decode = URLDecoder.decode(url.getQuery(), StandardCharsets.UTF_8.name()); final Optional<Integer> skipParameter = Arrays.stream(decode.split("&amp;|&")) .map(part -> part.split("=", 2)) .filter(parts -> parts[0].equalsIgnoreCase("$skip") && parts.length == 2) .map(parts -> Integer.valueOf(parts[1])) .findFirst(); if (skipParameter.isPresent()) { return new FeedPage<>(response.getStatusCode(), response.getHeaders(), response.getRequest(), entities, skipParameter.get()); } else { logger.warning("There should have been a skip parameter for the next page."); return new FeedPage<>(response.getStatusCode(), response.getHeaders(), response.getRequest(), entities); } } /** * Helper method that invokes the service method, extracts the data and translates it to a PagedResponse. * * @param skip Number of elements to skip. * @param context Context for the query. * * @return A Mono that completes with a paged response of queues. */ private Mono<PagedResponse<QueueProperties>> listQueues(int skip, Context context) { return managementClient.listEntitiesWithResponseAsync(QUEUES_ENTITY_TYPE, skip, NUMBER_OF_ELEMENTS, context) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .flatMap(response -> { final Response<QueueDescriptionFeed> feedResponse = deserialize(response, QueueDescriptionFeed.class); final QueueDescriptionFeed feed = feedResponse.getValue(); if (feed == null) { logger.warning("Could not deserialize QueueDescriptionFeed. skip {}, top: {}", skip, NUMBER_OF_ELEMENTS); return Mono.empty(); } final List<QueueProperties> entities = feed.getEntry().stream() .filter(e -> e.getContent() != null && e.getContent().getQueueDescription() != null) .map(e -> { final String queueName = getTitleValue(e.getTitle()); final QueueProperties queueProperties = EntityHelper.toModel( e.getContent().getQueueDescription()); EntityHelper.setQueueName(queueProperties, queueName); return queueProperties; }) .collect(Collectors.toList()); try { return Mono.just(extractPage(feedResponse, entities, feed.getLink())); } catch (MalformedURLException | UnsupportedEncodingException error) { return Mono.error(new RuntimeException("Could not parse response into FeedPage<QueueDescription>", error)); } }); } /** * Helper method that invokes the service method, extracts the data and translates it to a PagedResponse. * * @param skip Number of elements to skip. * @param context Context for the query. * * @return A Mono that completes with a paged response of rules. */ private Mono<PagedResponse<RuleProperties>> listRules(String topicName, String subscriptionName, int skip, Context context) { return managementClient.listRulesWithResponseAsync(topicName, subscriptionName, skip, NUMBER_OF_ELEMENTS, context) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .flatMap(response -> { final Response<RuleDescriptionFeed> feedResponse = deserialize(response, RuleDescriptionFeed.class); final RuleDescriptionFeed feed = feedResponse.getValue(); if (feed == null) { logger.warning("Could not deserialize RuleDescriptionFeed. skip {}, top: {}", skip, NUMBER_OF_ELEMENTS); return Mono.empty(); } final List<RuleProperties> entities = feed.getEntry().stream() .filter(e -> e.getContent() != null && e.getContent().getRuleDescription() != null) .map(e -> { return EntityHelper.toModel(e.getContent().getRuleDescription()); }) .collect(Collectors.toList()); try { return Mono.just(extractPage(feedResponse, entities, feed.getLink())); } catch (MalformedURLException | UnsupportedEncodingException error) { return Mono.error(new RuntimeException( "Could not parse response into FeedPage<RuleDescription>", error)); } }); } /** * Helper method that invokes the service method, extracts the data and translates it to a PagedResponse. * * @param skip Number of elements to skip. * @param context Context for the query. * * @return A Mono that completes with a paged response of subscriptions. */ private Mono<PagedResponse<SubscriptionProperties>> listSubscriptions(String topicName, int skip, Context context) { return managementClient.listSubscriptionsWithResponseAsync(topicName, skip, NUMBER_OF_ELEMENTS, context) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .flatMap(response -> { final Response<SubscriptionDescriptionFeed> feedResponse = deserialize(response, SubscriptionDescriptionFeed.class); final SubscriptionDescriptionFeed feed = feedResponse.getValue(); if (feed == null) { logger.warning("Could not deserialize SubscriptionDescriptionFeed. skip {}, top: {}", skip, NUMBER_OF_ELEMENTS); return Mono.empty(); } final List<SubscriptionProperties> entities = feed.getEntry().stream() .filter(e -> e.getContent() != null && e.getContent().getSubscriptionDescription() != null) .map(e -> { final String subscriptionName = getTitleValue(e.getTitle()); final SubscriptionProperties description = EntityHelper.toModel( e.getContent().getSubscriptionDescription()); EntityHelper.setTopicName(description, topicName); EntityHelper.setSubscriptionName(description, subscriptionName); return description; }) .collect(Collectors.toList()); try { return Mono.just(extractPage(feedResponse, entities, feed.getLink())); } catch (MalformedURLException | UnsupportedEncodingException error) { return Mono.error(new RuntimeException( "Could not parse response into FeedPage<SubscriptionDescription>", error)); } }); } /** * Helper method that invokes the service method, extracts the data and translates it to a PagedResponse. * * @param skip Number of elements to skip. * @param context Context for the query. * * @return A Mono that completes with a paged response of topics. */ private Mono<PagedResponse<TopicProperties>> listTopics(int skip, Context context) { return managementClient.listEntitiesWithResponseAsync(TOPICS_ENTITY_TYPE, skip, NUMBER_OF_ELEMENTS, context) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .flatMap(response -> { final Response<TopicDescriptionFeed> feedResponse = deserialize(response, TopicDescriptionFeed.class); final TopicDescriptionFeed feed = feedResponse.getValue(); if (feed == null) { logger.warning("Could not deserialize TopicDescriptionFeed. skip {}, top: {}", skip, NUMBER_OF_ELEMENTS); return Mono.empty(); } final List<TopicProperties> entities = feed.getEntry().stream() .filter(e -> e.getContent() != null && e.getContent().getTopicDescription() != null) .map(e -> { final String topicName = getTitleValue(e.getTitle()); final TopicProperties topicProperties = EntityHelper.toModel( e.getContent().getTopicDescription()); EntityHelper.setTopicName(topicProperties, topicName); return topicProperties; }) .collect(Collectors.toList()); try { return Mono.just(extractPage(feedResponse, entities, feed.getLink())); } catch (MalformedURLException | UnsupportedEncodingException error) { return Mono.error(new RuntimeException("Could not parse response into FeedPage<TopicDescription>", error)); } }); } /** * Check that the additional headers field is present and add the additional auth header * * @param headerName name of the header to be added * @param context current request context * * @return boolean representing the outcome of adding header operation */ private void addSupplementaryAuthHeader(String headerName, String entity, Context context) { context.getData(AZURE_REQUEST_HTTP_HEADERS_KEY) .map(headers -> { if (headers instanceof HttpHeaders) { HttpHeaders customHttpHeaders = (HttpHeaders) headers; customHttpHeaders.add(headerName, entity); } return null; }); } /** * Checks if the given entity is an absolute URL, if so return it. * Otherwise, construct the URL from the given entity and return that. * * @param entity : entity to forward messages to. * * @return Forward to Entity represented as an absolute URL */ /** * Given an XML title element, returns the XML text inside. Jackson deserializes Objects as LinkedHashMaps. XML text * is represented as an entry with an empty string as the key. * * For example, the text returned from this {@code <title text="text/xml">QueueName</title>} is "QueueName". * * @param responseTitle XML title element. * * @return The XML text inside the title. {@code null} is returned if there is no value. */ @SuppressWarnings("unchecked") private String getTitleValue(Object responseTitle) { if (!(responseTitle instanceof Map)) { return null; } final Map<String, String> map; try { map = (Map<String, String>) responseTitle; return map.get(""); } catch (ClassCastException error) { logger.warning("Unable to cast to Map<String,String>. Title: {}", responseTitle, error); return null; } } /** * Maps an exception from the ATOM APIs to its associated {@link HttpResponseException}. * * @param exception Exception from the ATOM API. * * @return The corresponding {@link HttpResponseException} or {@code throwable} if it is not an instance of {@link * ServiceBusManagementErrorException}. */ private static Throwable mapException(Throwable exception) { if (!(exception instanceof ServiceBusManagementErrorException)) { return exception; } final ServiceBusManagementErrorException managementError = ((ServiceBusManagementErrorException) exception); final ServiceBusManagementError error = managementError.getValue(); final HttpResponse errorHttpResponse = managementError.getResponse(); final int statusCode = error != null && error.getCode() != null ? error.getCode() : errorHttpResponse.getStatusCode(); final String errorDetail = error != null && error.getDetail() != null ? error.getDetail() : managementError.getMessage(); switch (statusCode) { case 401: return new ClientAuthenticationException(errorDetail, managementError.getResponse(), exception); case 404: return new ResourceNotFoundException(errorDetail, managementError.getResponse(), exception); case 409: return new ResourceExistsException(errorDetail, managementError.getResponse(), exception); case 412: return new ResourceModifiedException(errorDetail, managementError.getResponse(), exception); default: return new HttpResponseException(errorDetail, managementError.getResponse(), exception); } } /** * A page of Service Bus entities. * * @param <T> The entity description from Service Bus. */ private static final class FeedPage<T> implements PagedResponse<T> { private final int statusCode; private final HttpHeaders header; private final HttpRequest request; private final IterableStream<T> entries; private final String continuationToken; /** * Creates a page that does not have any more pages. * * @param entries Items in the page. */ private FeedPage(int statusCode, HttpHeaders header, HttpRequest request, List<T> entries) { this.statusCode = statusCode; this.header = header; this.request = request; this.entries = new IterableStream<>(entries); this.continuationToken = null; } /** * Creates an instance that has additional pages to fetch. * * @param entries Items in the page. * @param skip Number of elements to "skip". */ private FeedPage(int statusCode, HttpHeaders header, HttpRequest request, List<T> entries, int skip) { this.statusCode = statusCode; this.header = header; this.request = request; this.entries = new IterableStream<>(entries); this.continuationToken = String.valueOf(skip); } @Override public IterableStream<T> getElements() { return entries; } @Override public String getContinuationToken() { return continuationToken; } @Override public int getStatusCode() { return statusCode; } @Override public HttpHeaders getHeaders() { return header; } @Override public HttpRequest getRequest() { return request; } @Override public void close() { } } private static final class EntityNotFoundHttpResponse<T> extends HttpResponse { private final int statusCode; private final HttpHeaders headers; private EntityNotFoundHttpResponse(Response<T> response) { super(response.getRequest()); this.headers = response.getHeaders(); this.statusCode = response.getStatusCode(); } @Override public int getStatusCode() { return statusCode; } @Override public String getHeaderValue(String name) { return headers.getValue(name); } @Override public HttpHeaders getHeaders() { return headers; } @Override public Flux<ByteBuffer> getBody() { return Flux.empty(); } @Override public Mono<byte[]> getBodyAsByteArray() { return Mono.empty(); } @Override public Mono<String> getBodyAsString() { return Mono.empty(); } @Override public Mono<String> getBodyAsString(Charset charset) { return Mono.empty(); } } }
class ServiceBusAdministrationAsyncClient { private static final String CONTENT_TYPE = "application/xml"; private static final String QUEUES_ENTITY_TYPE = "queues"; private static final String TOPICS_ENTITY_TYPE = "topics"; private static final int NUMBER_OF_ELEMENTS = 100; private final ServiceBusManagementClientImpl managementClient; private final EntitiesImpl entityClient; private final ClientLogger logger = new ClientLogger(ServiceBusAdministrationAsyncClient.class); private final ServiceBusManagementSerializer serializer; private final RulesImpl rulesClient; /** * Creates a new instance with the given management client and serializer. * * @param managementClient Client to make management calls. * @param serializer Serializer to deserialize ATOM XML responses. * * @throws NullPointerException if any one of {@code managementClient, serializer, credential} is null. */ ServiceBusAdministrationAsyncClient(ServiceBusManagementClientImpl managementClient, ServiceBusManagementSerializer serializer) { this.serializer = Objects.requireNonNull(serializer, "'serializer' cannot be null."); this.managementClient = Objects.requireNonNull(managementClient, "'managementClient' cannot be null."); this.entityClient = managementClient.getEntities(); this.rulesClient = managementClient.getRules(); } /** * Creates a queue with the given name. * * @param queueName Name of the queue to create. * * @return A Mono that completes with information about the created queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code queueName} is null. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws ResourceExistsException if a queue exists with the same {@code queueName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueProperties> createQueue(String queueName) { try { return createQueue(queueName, new CreateQueueOptions()); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Creates a queue with the {@link CreateQueueOptions} and given queue name. * * @param queueName Name of the queue to create. * @param queueOptions Options about the queue to create. * * @return A Mono that completes with information about the created queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} or {@code queueOptions} is null. * @throws ResourceExistsException if a queue exists with the same {@link QueueProperties * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueProperties> createQueue(String queueName, CreateQueueOptions queueOptions) { return createQueueWithResponse(queueName, queueOptions).map(Response::getValue); } /** * Creates a queue and returns the created queue in addition to the HTTP response. * * @param queueName Name of the queue to create. * @param queueOptions Options about the queue to create. * * @return A Mono that returns the created queue in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} or {@code queueOptions} is null. * @throws ResourceExistsException if a queue exists with the same {@link QueueProperties * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<QueueProperties>> createQueueWithResponse(String queueName, CreateQueueOptions queueOptions) { return withContext(context -> createQueueWithResponse(queueName, queueOptions, context)); } /** * Creates a rule under the given topic and subscription * * @param topicName Name of the topic associated with rule. * @param subscriptionName Name of the subscription associated with the rule. * @param ruleName Name of the rule. * * @return A Mono that completes with information about the created rule. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} are are empty strings. * @throws NullPointerException if {@code topicName} or {@code ruleName} are are null. * @throws ResourceExistsException if a rule exists with the same topic, subscription, and rule name. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<RuleProperties> createRule(String topicName, String subscriptionName, String ruleName) { try { return createRule(topicName, subscriptionName, ruleName, new CreateRuleOptions()); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Creates a rule with the {@link CreateRuleOptions}. * * @param topicName Name of the topic associated with rule. * @param subscriptionName Name of the subscription associated with the rule. * @param ruleName Name of the rule. * @param ruleOptions Information about the rule to create. * * @return A Mono that completes with information about the created rule. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} are are empty strings. * @throws NullPointerException if {@code topicName}, {@code ruleName}, or {@code ruleOptions} * are are null. * @throws ResourceExistsException if a rule exists with the same topic and rule name. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<RuleProperties> createRule(String topicName, String subscriptionName, String ruleName, CreateRuleOptions ruleOptions) { return createRuleWithResponse(topicName, subscriptionName, ruleName, ruleOptions) .map(Response::getValue); } /** * Creates a rule and returns the created rule in addition to the HTTP response. * * @param topicName Name of the topic associated with rule. * @param subscriptionName Name of the subscription associated with the rule. * @param ruleName Name of the rule. * @param ruleOptions Information about the rule to create. * * @return A Mono that returns the created rule in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} are are empty strings. * @throws NullPointerException if {@code topicName}, {@code ruleName}, or {@code ruleOptions} * are are null. * @throws ResourceExistsException if a rule exists with the same topic and rule name. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<RuleProperties>> createRuleWithResponse(String topicName, String subscriptionName, String ruleName, CreateRuleOptions ruleOptions) { return withContext(context -> createRuleWithResponse(topicName, subscriptionName, ruleName, ruleOptions, context)); } /** * Creates a subscription with the given topic and subscription names. * * @param topicName Name of the topic associated with subscription. * @param subscriptionName Name of the subscription. * * @return A Mono that completes with information about the created subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are are empty strings. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} are are null. * @throws ResourceExistsException if a subscription exists with the same topic and subscription name. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionProperties> createSubscription(String topicName, String subscriptionName) { try { return createSubscription(topicName, subscriptionName, new CreateSubscriptionOptions()); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Creates a subscription with the {@link CreateSubscriptionOptions}. * * @param topicName Name of the topic associated with subscription. * @param subscriptionName Name of the subscription. * @param subscriptionOptions Information about the subscription to create. * * @return A Mono that completes with information about the created subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are are empty strings. * @throws NullPointerException if {@code topicName}, {@code subscriptionName}, or {@code subscriptionOptions} * are are null. * @throws ResourceExistsException if a subscription exists with the same topic and subscription name. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionProperties> createSubscription(String topicName, String subscriptionName, CreateSubscriptionOptions subscriptionOptions) { return createSubscriptionWithResponse(topicName, subscriptionName, subscriptionOptions) .map(Response::getValue); } /** * Creates a subscription and returns the created subscription in addition to the HTTP response. * * @param topicName Name of the topic associated with subscription. * @param subscriptionName Name of the subscription. * @param subscriptionOptions Information about the subscription to create. * * @return A Mono that returns the created subscription in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are are empty strings. * @throws NullPointerException if {@code topicName}, {@code subscriptionName}, or {@code subscriptionOptions} * are are null. * @throws ResourceExistsException if a subscription exists with the same topic and subscription name. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SubscriptionProperties>> createSubscriptionWithResponse(String topicName, String subscriptionName, CreateSubscriptionOptions subscriptionOptions) { return withContext(context -> createSubscriptionWithResponse(topicName, subscriptionName, subscriptionOptions, context)); } /** * Creates a topic with the given name. * * @param topicName Name of the topic to create. * * @return A Mono that completes with information about the created topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code topicName} is null. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws ResourceExistsException if a topic exists with the same {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicProperties> createTopic(String topicName) { try { return createTopic(topicName, new CreateTopicOptions()); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Creates a topic with the {@link CreateTopicOptions}. * * @param topicName Name of the topic to create. * @param topicOptions The options used to create the topic. * * @return A Mono that completes with information about the created topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code topicOptions} is null. * @throws ResourceExistsException if a topic exists with the same {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicProperties> createTopic(String topicName, CreateTopicOptions topicOptions) { return createTopicWithResponse(topicName, topicOptions).map(Response::getValue); } /** * Creates a topic and returns the created topic in addition to the HTTP response. * * @param topicName Name of the topic to create. * @param topicOptions The options used to create the topic. * * @return A Mono that returns the created topic in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link TopicProperties * string. * @throws NullPointerException if {@code topicName} or {@code topicOptions} is null. * @throws ResourceExistsException if a topic exists with the same {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<TopicProperties>> createTopicWithResponse(String topicName, CreateTopicOptions topicOptions) { return withContext(context -> createTopicWithResponse(topicName, topicOptions, context)); } /** * Deletes a queue the matching {@code queueName}. * * @param queueName Name of queue to delete. * * @return A Mono that completes when the queue is deleted. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws NullPointerException if {@code queueName} is null. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteQueue(String queueName) { return deleteQueueWithResponse(queueName).then(); } /** * Deletes a queue the matching {@code queueName} and returns the HTTP response. * * @param queueName Name of queue to delete. * * @return A Mono that completes when the queue is deleted and returns the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws NullPointerException if {@code queueName} is null. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteQueueWithResponse(String queueName) { return withContext(context -> deleteQueueWithResponse(queueName, context)); } /** * Deletes a rule the matching {@code ruleName}. * * @param topicName Name of topic associated with rule to delete. * @param subscriptionName Name of the subscription associated with the rule to delete. * @param ruleName Name of rule to delete. * * @return A Mono that completes when the rule is deleted. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code ruleName} is null. * @throws ResourceNotFoundException if the {@code ruleName} does not exist. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteRule(String topicName, String subscriptionName, String ruleName) { return deleteRuleWithResponse(topicName, subscriptionName, ruleName).then(); } /** * Deletes a rule the matching {@code ruleName} and returns the HTTP response. * * @param topicName Name of topic associated with rule to delete. * @param subscriptionName Name of the subscription associated with the rule to delete. * @param ruleName Name of rule to delete. * * @return A Mono that completes when the rule is deleted and returns the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName}, {@code subscriptionName}, or {@code ruleName} is an * empty string. * @throws NullPointerException if {@code topicName}, {@code subscriptionName}, or {@code ruleName} is null. * @throws ResourceNotFoundException if the {@code ruleName} does not exist. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteRuleWithResponse(String topicName, String subscriptionName, String ruleName) { return withContext(context -> deleteRuleWithResponse(topicName, subscriptionName, ruleName, context)); } /** * Deletes a subscription the matching {@code subscriptionName}. * * @param topicName Name of topic associated with subscription to delete. * @param subscriptionName Name of subscription to delete. * * @return A Mono that completes when the subscription is deleted. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} is null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteSubscription(String topicName, String subscriptionName) { return deleteSubscriptionWithResponse(topicName, subscriptionName).then(); } /** * Deletes a subscription the matching {@code subscriptionName} and returns the HTTP response. * * @param topicName Name of topic associated with subscription to delete. * @param subscriptionName Name of subscription to delete. * * @return A Mono that completes when the subscription is deleted and returns the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} is null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteSubscriptionWithResponse(String topicName, String subscriptionName) { return withContext(context -> deleteSubscriptionWithResponse(topicName, subscriptionName, context)); } /** * Deletes a topic the matching {@code topicName}. * * @param topicName Name of topic to delete. * * @return A Mono that completes when the topic is deleted. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteTopic(String topicName) { return deleteTopicWithResponse(topicName).then(); } /** * Deletes a topic the matching {@code topicName} and returns the HTTP response. * * @param topicName Name of topic to delete. * * @return A Mono that completes when the topic is deleted and returns the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteTopicWithResponse(String topicName) { return withContext(context -> deleteTopicWithResponse(topicName, context)); } /** * Gets information about the queue. * * @param queueName Name of queue to get information about. * * @return A Mono that completes with information about the queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueProperties> getQueue(String queueName) { return getQueueWithResponse(queueName).map(Response::getValue); } /** * Gets information about the queue along with its HTTP response. * * @param queueName Name of queue to get information about. * * @return A Mono that completes with information about the queue and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<QueueProperties>> getQueueWithResponse(String queueName) { return withContext(context -> getQueueWithResponse(queueName, context, Function.identity())); } /** * Gets whether or not a queue with {@code queueName} exists in the Service Bus namespace. * * @param queueName Name of the queue. * * @return A Mono that completes indicating whether or not the queue exists. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Boolean> getQueueExists(String queueName) { return getQueueExistsWithResponse(queueName).map(Response::getValue); } /** * Gets whether or not a queue with {@code queueName} exists in the Service Bus namespace. * * @param queueName Name of the queue. * * @return A Mono that completes indicating whether or not the queue exists along with its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Boolean>> getQueueExistsWithResponse(String queueName) { return getEntityExistsWithResponse(getQueueWithResponse(queueName)); } /** * Gets runtime properties about the queue. * * @param queueName Name of queue to get information about. * * @return A Mono that completes with runtime properties about the queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueRuntimeProperties> getQueueRuntimeProperties(String queueName) { return getQueueRuntimePropertiesWithResponse(queueName).map(response -> response.getValue()); } /** * Gets runtime properties about the queue along with its HTTP response. * * @param queueName Name of queue to get information about. * * @return A Mono that completes with runtime properties about the queue and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<QueueRuntimeProperties>> getQueueRuntimePropertiesWithResponse(String queueName) { return withContext(context -> getQueueWithResponse(queueName, context, QueueRuntimeProperties::new)); } /** * Gets information about the Service Bus namespace. * * @return A Mono that completes with information about the Service Bus namespace. * @throws ClientAuthenticationException if the client's credentials do not have access to the namespace. * @throws HttpResponseException If error occurred processing the request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<NamespaceProperties> getNamespaceProperties() { return getNamespacePropertiesWithResponse().map(Response::getValue); } /** * Gets information about the Service Bus namespace along with its HTTP response. * * @return A Mono that completes with information about the namespace and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<NamespaceProperties>> getNamespacePropertiesWithResponse() { return withContext(this::getNamespacePropertiesWithResponse); } /** * Gets a rule from the service namespace. * * Only following data types are deserialized in Filters and Action parameters - string, int, long, boolean, double, * and OffsetDateTime. Other data types would return its string value. * * @param topicName The name of the topic relative to service bus namespace. * @param subscriptionName The subscription name the rule belongs to. * @param ruleName The name of the rule to retrieve. * * @return The associated rule. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<RuleProperties> getRule(String topicName, String subscriptionName, String ruleName) { return getRuleWithResponse(topicName, subscriptionName, ruleName).map(response -> response.getValue()); } /** * Gets a rule from the service namespace. * * Only following data types are deserialized in Filters and Action parameters - string, int, long, bool, double, * and OffsetDateTime. Other data types would return its string value. * * @param topicName The name of the topic relative to service bus namespace. * @param subscriptionName The subscription name the rule belongs to. * @param ruleName The name of the rule to retrieve. * * @return The associated rule with the corresponding HTTP response. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<RuleProperties>> getRuleWithResponse(String topicName, String subscriptionName, String ruleName) { return withContext(context -> getRuleWithResponse(topicName, subscriptionName, ruleName, context)); } /** * Gets information about the queue. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return A Mono that completes with information about the subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are empty strings. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} are null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist in the {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionProperties> getSubscription(String topicName, String subscriptionName) { return getSubscriptionWithResponse(topicName, subscriptionName).map(Response::getValue); } /** * Gets information about the subscription along with its HTTP response. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return A Mono that completes with information about the subscription and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are empty strings. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} are null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SubscriptionProperties>> getSubscriptionWithResponse(String topicName, String subscriptionName) { return withContext(context -> getSubscriptionWithResponse(topicName, subscriptionName, context, Function.identity())); } /** * Gets whether or not a subscription within a topic exists. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of the subscription. * * @return A Mono that completes indicating whether or not the subscription exists. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code subscriptionName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Boolean> getSubscriptionExists(String topicName, String subscriptionName) { return getSubscriptionExistsWithResponse(topicName, subscriptionName).map(Response::getValue); } /** * Gets whether or not a subscription within a topic exists. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of the subscription. * * @return A Mono that completes indicating whether or not the subscription exists along with its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code subscriptionName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Boolean>> getSubscriptionExistsWithResponse(String topicName, String subscriptionName) { return getEntityExistsWithResponse(getSubscriptionWithResponse(topicName, subscriptionName)); } /** * Gets runtime properties about the subscription. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return A Mono that completes with runtime properties about the subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are empty strings. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} are null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionRuntimeProperties> getSubscriptionRuntimeProperties( String topicName, String subscriptionName) { return getSubscriptionRuntimePropertiesWithResponse(topicName, subscriptionName) .map(response -> response.getValue()); } /** * Gets runtime properties about the subscription. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return A Mono that completes with runtime properties about the subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code subscriptionName} is null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SubscriptionRuntimeProperties>> getSubscriptionRuntimePropertiesWithResponse( String topicName, String subscriptionName) { return withContext(context -> getSubscriptionWithResponse(topicName, subscriptionName, context, SubscriptionRuntimeProperties::new)); } /** * Gets information about the topic. * * @param topicName Name of topic to get information about. * * @return A Mono that completes with information about the topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicProperties> getTopic(String topicName) { return getTopicWithResponse(topicName).map(Response::getValue); } /** * Gets information about the topic along with its HTTP response. * * @param topicName Name of topic to get information about. * * @return A Mono that completes with information about the topic and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<TopicProperties>> getTopicWithResponse(String topicName) { return withContext(context -> getTopicWithResponse(topicName, context, Function.identity())); } /** * Gets whether or not a topic with {@code topicName} exists in the Service Bus namespace. * * @param topicName Name of the topic. * * @return A Mono that completes indicating whether or not the topic exists. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Boolean> getTopicExists(String topicName) { return getTopicExistsWithResponse(topicName).map(Response::getValue); } /** * Gets whether or not a topic with {@code topicName} exists in the Service Bus namespace. * * @param topicName Name of the topic. * * @return A Mono that completes indicating whether or not the topic exists along with its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Boolean>> getTopicExistsWithResponse(String topicName) { return getEntityExistsWithResponse(getTopicWithResponse(topicName)); } /** * Gets runtime properties about the topic. * * @param topicName Name of topic to get information about. * * @return A Mono that completes with runtime properties about the topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicRuntimeProperties> getTopicRuntimeProperties(String topicName) { return getTopicRuntimePropertiesWithResponse(topicName).map(response -> response.getValue()); } /** * Gets runtime properties about the topic with its HTTP response. * * @param topicName Name of topic to get information about. * * @return A Mono that completes with runtime properties about the topic and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<TopicRuntimeProperties>> getTopicRuntimePropertiesWithResponse(String topicName) { return withContext(context -> getTopicWithResponse(topicName, context, TopicRuntimeProperties::new)); } /** * Fetches all the queues in the Service Bus namespace. * * @return A Flux of {@link QueueProperties queues} in the Service Bus namespace. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<QueueProperties> listQueues() { return new PagedFlux<>( () -> withContext(context -> listQueuesFirstPage(context)), token -> withContext(context -> listQueuesNextPage(token, context))); } /** * Fetches all the rules for a topic and subscription. * * @param topicName The topic name under which all the rules need to be retrieved. * @param subscriptionName The name of the subscription for which all rules need to be retrieved. * * @return A Flux of {@link RuleProperties rules} for the {@code topicName} and {@code subscriptionName}. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} is null. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} is an empty string. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<RuleProperties> listRules(String topicName, String subscriptionName) { if (topicName == null) { return pagedFluxError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return pagedFluxError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } return new PagedFlux<>( () -> withContext(context -> listRulesFirstPage(topicName, subscriptionName, context)), token -> withContext(context -> listRulesNextPage(topicName, subscriptionName, token, context))); } /** * Fetches all the subscriptions for a topic. * * @param topicName The topic name under which all the subscriptions need to be retrieved. * * @return A Flux of {@link SubscriptionProperties subscriptions} for the {@code topicName}. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws NullPointerException if {@code topicName} is null. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<SubscriptionProperties> listSubscriptions(String topicName) { if (topicName == null) { return pagedFluxError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return pagedFluxError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } return new PagedFlux<>( () -> withContext(context -> listSubscriptionsFirstPage(topicName, context)), token -> withContext(context -> listSubscriptionsNextPage(topicName, token, context))); } /** * Fetches all the topics in the Service Bus namespace. * * @return A Flux of {@link TopicProperties topics} in the Service Bus namespace. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<TopicProperties> listTopics() { return new PagedFlux<>( () -> withContext(context -> listTopicsFirstPage(context)), token -> withContext(context -> listTopicsNextPage(token, context))); } /** * Updates a queue with the given {@link QueueProperties}. The {@link QueueProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link QueueProperties * <li>{@link QueueProperties * <li>{@link QueueProperties * </li> * <li>{@link QueueProperties * </ul> * * @param queue Information about the queue to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that completes with the updated queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code queue} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueProperties> updateQueue(QueueProperties queue) { return updateQueueWithResponse(queue).map(Response::getValue); } /** * Updates a queue with the given {@link QueueProperties}. The {@link QueueProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link QueueProperties * <li>{@link QueueProperties * <li>{@link QueueProperties * </li> * <li>{@link QueueProperties * </ul> * * @param queue Information about the queue to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated queue in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code queue} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<QueueProperties>> updateQueueWithResponse(QueueProperties queue) { return withContext(context -> updateQueueWithResponse(queue, context)); } /** * Updates a rule with the given {@link RuleProperties}. The {@link RuleProperties} must be fully populated as all * of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * @param topicName The topic name under which the rule is updated. * @param subscriptionName The name of the subscription for which the rule is updated. * @param rule Information about the rule to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated rule. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the rule quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link RuleProperties * @throws NullPointerException if {@code rule} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<RuleProperties> updateRule(String topicName, String subscriptionName, RuleProperties rule) { return updateRuleWithResponse(topicName, subscriptionName, rule).map(Response::getValue); } /** * Updates a rule with the given {@link RuleProperties}. The {@link RuleProperties} must be fully populated as all * of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * @param topicName The topic name under which the rule is updated. * @param subscriptionName The name of the subscription for which the rule is updated. * @param rule Information about the rule to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated rule in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the rule quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link RuleProperties * @throws NullPointerException if {@code rule} is null. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<RuleProperties>> updateRuleWithResponse(String topicName, String subscriptionName, RuleProperties rule) { return withContext(context -> updateRuleWithResponse(topicName, subscriptionName, rule, context)); } /** * Updates a subscription with the given {@link SubscriptionProperties}. The {@link SubscriptionProperties} must be * fully populated as all of the properties are replaced. If a property is not set the service default value is * used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * </ul> * * @param subscription Information about the subscription to update. You must provide all the property values * that are desired on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the subscription quota is exceeded, or an * error occurred processing the request. * @throws IllegalArgumentException if {@link SubscriptionProperties * SubscriptionProperties * @throws NullPointerException if {@code subscription} is null. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionProperties> updateSubscription(SubscriptionProperties subscription) { return updateSubscriptionWithResponse(subscription).map(Response::getValue); } /** * Updates a subscription with the given {@link SubscriptionProperties}. The {@link SubscriptionProperties} must be * fully populated as all of the properties are replaced. If a property is not set the service default value is * used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * </ul> * * @param subscription Information about the subscription to update. You must provide all the property values * that are desired on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated subscription in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the subscription quota is exceeded, or an * error occurred processing the request. * @throws IllegalArgumentException if {@link SubscriptionProperties * SubscriptionProperties * @throws NullPointerException if {@code subscription} is null. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SubscriptionProperties>> updateSubscriptionWithResponse( SubscriptionProperties subscription) { return withContext(context -> updateSubscriptionWithResponse(subscription, context)); } /** * Updates a topic with the given {@link TopicProperties}. The {@link TopicProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link TopicProperties * <li>{@link TopicProperties * </li> * </ul> * * @param topic Information about the topic to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that completes with the updated topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link TopicProperties * string. * @throws NullPointerException if {@code topic} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicProperties> updateTopic(TopicProperties topic) { return updateTopicWithResponse(topic).map(Response::getValue); } /** * Updates a topic with the given {@link TopicProperties}. The {@link TopicProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link TopicProperties * <li>{@link TopicProperties * </li> * </ul> * * @param topic Information about the topic to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that completes with the updated topic and its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link TopicProperties * string. * @throws NullPointerException if {@code topic} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<TopicProperties>> updateTopicWithResponse(TopicProperties topic) { return withContext(context -> updateTopicWithResponse(topic, context)); } /** * Creates a queue with its context. * * @param createQueueOptions Queue to create. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link QueueProperties}. */ Mono<Response<QueueProperties>> createQueueWithResponse(String queueName, CreateQueueOptions createQueueOptions, Context context) { if (queueName == null) { return monoError(logger, new NullPointerException("'queueName' cannot be null.")); } else if (queueName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'queueName' cannot be empty.")); } if (createQueueOptions == null) { return monoError(logger, new NullPointerException("'createQueueOptions' cannot be null.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context contextWithHeaders = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE) .addData(AZURE_REQUEST_HTTP_HEADERS_KEY, new HttpHeaders()); final String forwardToEntity = createQueueOptions.getForwardTo(); if (!CoreUtils.isNullOrEmpty(forwardToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardToEntity, contextWithHeaders); createQueueOptions.setForwardTo(getAbsoluteUrlFromEntity(forwardToEntity)); } final String forwardDlqToEntity = createQueueOptions.getForwardDeadLetteredMessagesTo(); if (!CoreUtils.isNullOrEmpty(forwardDlqToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_DLQ_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardDlqToEntity, contextWithHeaders); createQueueOptions.setForwardDeadLetteredMessagesTo(getAbsoluteUrlFromEntity(forwardDlqToEntity)); } final QueueDescription description = EntityHelper.getQueueDescription(createQueueOptions); final CreateQueueBodyContent content = new CreateQueueBodyContent() .setType(CONTENT_TYPE) .setQueueDescription(description); final CreateQueueBody createEntity = new CreateQueueBody() .setContent(content); try { return entityClient.putWithResponseAsync(queueName, createEntity, null, contextWithHeaders) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(this::deserializeQueue); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a rule with its context. * * @param ruleOptions Rule to create. * @param context Context to pass into request. * * * @return A Mono that completes with the created {@link RuleProperties}. */ Mono<Response<RuleProperties>> createRuleWithResponse(String topicName, String subscriptionName, String ruleName, CreateRuleOptions ruleOptions, Context context) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be empty.")); } if (subscriptionName == null) { return monoError(logger, new NullPointerException("'subscriptionName' cannot be null.")); } else if (subscriptionName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'subscriptionName' cannot be empty.")); } if (ruleName == null) { return monoError(logger, new NullPointerException("'ruleName' cannot be null.")); } else if (ruleName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'ruleName' cannot be empty.")); } if (ruleOptions == null) { return monoError(logger, new NullPointerException("'rule' cannot be null.")); } final RuleActionImpl action = ruleOptions.getAction() != null ? EntityHelper.toImplementation(ruleOptions.getAction()) : null; final RuleFilterImpl filter = ruleOptions.getFilter() != null ? EntityHelper.toImplementation(ruleOptions.getFilter()) : null; final RuleDescription rule = new RuleDescription() .setAction(action) .setFilter(filter) .setName(ruleName); final CreateRuleBodyContent content = new CreateRuleBodyContent() .setType(CONTENT_TYPE) .setRuleDescription(rule); final CreateRuleBody createEntity = new CreateRuleBody().setContent(content); final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return managementClient.getRules().putWithResponseAsync(topicName, subscriptionName, ruleName, createEntity, null, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeRule(response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a subscription with its context. * * @param subscriptionOptions Subscription to create. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link SubscriptionProperties}. */ Mono<Response<SubscriptionProperties>> createSubscriptionWithResponse(String topicName, String subscriptionName, CreateSubscriptionOptions subscriptionOptions, Context context) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be empty.")); } if (subscriptionName == null) { return monoError(logger, new NullPointerException("'subscriptionName' cannot be null.")); } else if (subscriptionName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'subscriptionName' cannot be empty.")); } if (subscriptionOptions == null) { return monoError(logger, new NullPointerException("'subscription' cannot be null.")); } final Context contextWithHeaders = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE) .addData(AZURE_REQUEST_HTTP_HEADERS_KEY, new HttpHeaders()); final String forwardToEntity = subscriptionOptions.getForwardTo(); if (!CoreUtils.isNullOrEmpty(forwardToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardToEntity, contextWithHeaders); subscriptionOptions.setForwardTo(getAbsoluteUrlFromEntity(forwardToEntity)); } final String forwardDlqToEntity = subscriptionOptions.getForwardDeadLetteredMessagesTo(); if (!CoreUtils.isNullOrEmpty(forwardDlqToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_DLQ_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardDlqToEntity, contextWithHeaders); subscriptionOptions.setForwardDeadLetteredMessagesTo(getAbsoluteUrlFromEntity(forwardDlqToEntity)); } final SubscriptionDescription subscription = EntityHelper.getSubscriptionDescription(subscriptionOptions); final CreateSubscriptionBodyContent content = new CreateSubscriptionBodyContent() .setType(CONTENT_TYPE) .setSubscriptionDescription(subscription); final CreateSubscriptionBody createEntity = new CreateSubscriptionBody().setContent(content); try { return managementClient.getSubscriptions().putWithResponseAsync(topicName, subscriptionName, createEntity, null, contextWithHeaders) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeSubscription(topicName, response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a topicOptions with its context. * * @param topicOptions Topic to create. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link TopicProperties}. */ Mono<Response<TopicProperties>> createTopicWithResponse(String topicName, CreateTopicOptions topicOptions, Context context) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be empty.")); } if (topicOptions == null) { return monoError(logger, new NullPointerException("'topicOptions' cannot be null")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final TopicDescription topic = EntityHelper.getTopicDescription(topicOptions); final CreateTopicBodyContent content = new CreateTopicBodyContent() .setType(CONTENT_TYPE) .setTopicDescription(topic); final CreateTopicBody createEntity = new CreateTopicBody() .setContent(content); final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return entityClient.putWithResponseAsync(topicName, createEntity, null, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(this::deserializeTopic); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes a queue with its context. * * @param queueName Name of queue to delete. * @param context Context to pass into request. * * @return A Mono that completes when the queue is deleted. */ Mono<Response<Void>> deleteQueueWithResponse(String queueName, Context context) { if (queueName == null) { return monoError(logger, new NullPointerException("'queueName' cannot be null")); } else if (queueName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'queueName' cannot be an empty string.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return entityClient.deleteWithResponseAsync(queueName, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes a queue with its context. * * @param topicName Name of topic to delete. * @param subscriptionName Name of the subscription for the rule. * @param ruleName Name of the rule. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link QueueProperties}. */ Mono<Response<Void>> deleteRuleWithResponse(String topicName, String subscriptionName, String ruleName, Context context) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } else if (subscriptionName == null) { return monoError(logger, new NullPointerException("'subscriptionName' cannot be null")); } else if (subscriptionName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'subscriptionName' cannot be an empty string.")); } else if (ruleName == null) { return monoError(logger, new NullPointerException("'ruleName' cannot be null")); } else if (ruleName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'ruleName' cannot be an empty string.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return rulesClient.deleteWithResponseAsync(topicName, subscriptionName, ruleName, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes a subscription with its context. * * @param topicName Name of topic associated with subscription to delete. * @param subscriptionName Name of subscription to delete. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link SubscriptionProperties}. */ Mono<Response<Void>> deleteSubscriptionWithResponse(String topicName, String subscriptionName, Context context) { if (subscriptionName == null) { return monoError(logger, new NullPointerException("'subscriptionName' cannot be null")); } else if (subscriptionName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'subscriptionName' cannot be an empty string.")); } else if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return managementClient.getSubscriptions().deleteWithResponseAsync(topicName, subscriptionName, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes a topic with its context. * * @param topicName Name of topic to delete. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link TopicProperties}. */ Mono<Response<Void>> deleteTopicWithResponse(String topicName, Context context) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return entityClient.deleteWithResponseAsync(topicName, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets whether an entity exists. * * @param getEntityOperation Operation to get information about entity. If {@link ResourceNotFoundException} is * thrown, then it is mapped to false. * @param <T> Entity type. * * @return True if the entity exists, false otherwise. */ <T> Mono<Response<Boolean>> getEntityExistsWithResponse(Mono<Response<T>> getEntityOperation) { return getEntityOperation.map(response -> { final boolean exists = response.getValue() != null; return (Response<Boolean>) new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), exists); }) .onErrorResume(ResourceNotFoundException.class, exception -> { final HttpResponse response = exception.getResponse(); final Response<Boolean> result = new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), false); return Mono.just(result); }); } /** * Gets a queue with its context. * * @param queueName Name of queue to fetch information for. * @param context Context to pass into request. * * @return A Mono that completes with the {@link QueueProperties}. */ <T> Mono<Response<T>> getQueueWithResponse(String queueName, Context context, Function<QueueProperties, T> mapper) { if (queueName == null) { return monoError(logger, new NullPointerException("'queueName' cannot be null")); } else if (queueName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'queueName' cannot be empty.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return entityClient.getWithResponseAsync(queueName, true, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .handle((response, sink) -> { final Response<QueueProperties> deserialize = deserializeQueue(response); if (deserialize.getValue() == null) { final HttpResponse notFoundResponse = new EntityNotFoundHttpResponse<>(deserialize); sink.error(new ResourceNotFoundException(String.format("Queue '%s' does not exist.", queueName), notFoundResponse)); } else { final T mapped = mapper.apply(deserialize.getValue()); sink.next(new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), mapped)); } }); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<RuleProperties>> getRuleWithResponse(String topicName, String subscriptionName, String ruleName, Context context) { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return rulesClient.getWithResponseAsync(topicName, subscriptionName, ruleName, true, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(this::deserializeRule); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets a subscription with its context. * * @param topicName Name of the topic associated with the subscription. * @param subscriptionName Name of subscription to fetch information for. * @param context Context to pass into request. * * @return A Mono that completes with the {@link SubscriptionProperties}. */ <T> Mono<Response<T>> getSubscriptionWithResponse(String topicName, String subscriptionName, Context context, Function<SubscriptionProperties, T> mapper) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be an empty string.")); } else if (subscriptionName == null) { return monoError(logger, new NullPointerException("'subscriptionName' cannot be null.")); } else if (subscriptionName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'subscriptionName' cannot be an empty string.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return managementClient.getSubscriptions().getWithResponseAsync(topicName, subscriptionName, true, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .handle((response, sink) -> { final Response<SubscriptionProperties> deserialize = deserializeSubscription(topicName, response); if (deserialize.getValue() == null) { final HttpResponse notFoundResponse = new EntityNotFoundHttpResponse<>(deserialize); sink.error(new ResourceNotFoundException(String.format( "Subscription '%s' in topic '%s' does not exist.", topicName, subscriptionName), notFoundResponse)); } else { final T mapped = mapper.apply(deserialize.getValue()); sink.next(new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), mapped)); } }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets the namespace properties with its context. * * @param context Context to pass into request. * * @return A Mono that completes with the {@link NamespaceProperties}. */ Mono<Response<NamespaceProperties>> getNamespacePropertiesWithResponse(Context context) { return managementClient.getNamespaces().getWithResponseAsync(context).handle((response, sink) -> { final NamespacePropertiesEntry entry = response.getValue(); if (entry == null || entry.getContent() == null) { sink.error(new AzureException( "There was no content inside namespace response. Entry: " + response)); return; } final NamespaceProperties namespaceProperties = entry.getContent().getNamespaceProperties(); final Response<NamespaceProperties> result = new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), namespaceProperties); sink.next(result); }); } /** * Gets a topic with its context. * * @param topicName Name of topic to fetch information for. * @param context Context to pass into request. * * @return A Mono that completes with the {@link TopicProperties}. */ <T> Mono<Response<T>> getTopicWithResponse(String topicName, Context context, Function<TopicProperties, T> mapper) { if (topicName == null) { return monoError(logger, new NullPointerException("'topicName' cannot be null")); } else if (topicName.isEmpty()) { return monoError(logger, new IllegalArgumentException("'topicName' cannot be empty.")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return entityClient.getWithResponseAsync(topicName, true, withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .handle((response, sink) -> { final Response<TopicProperties> deserialize = deserializeTopic(response); if (deserialize.getValue() == null) { final HttpResponse notFoundResponse = new EntityNotFoundHttpResponse<>(deserialize); sink.error(new ResourceNotFoundException(String.format("Topic '%s' does not exist.", topicName), notFoundResponse)); } else { final T mapped = mapper.apply(deserialize.getValue()); sink.next(new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), mapped)); } }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets the first page of queues with context. * * @param context Context to pass into request. * * @return A Mono that completes with a page of queues. */ Mono<PagedResponse<QueueProperties>> listQueuesFirstPage(Context context) { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return listQueues(0, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the next page of queues with context. * * @param continuationToken Number of items to skip in feed. * @param context Context to pass into request. * * @return A Mono that completes with a page of queues or empty if there are no items left. */ Mono<PagedResponse<QueueProperties>> listQueuesNextPage(String continuationToken, Context context) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } try { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); final int skip = Integer.parseInt(continuationToken); return listQueues(skip, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the first page of rules with context. * * @param context Context to pass into request. * * @return A Mono that completes with a page of rules. */ Mono<PagedResponse<RuleProperties>> listRulesFirstPage(String topicName, String subscriptionName, Context context) { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return listRules(topicName, subscriptionName, 0, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the next page of rules with context. * * @param continuationToken Number of items to skip in feed. * @param context Context to pass into request. * * @return A Mono that completes with a page of rules or empty if there are no items left. */ Mono<PagedResponse<RuleProperties>> listRulesNextPage(String topicName, String subscriptionName, String continuationToken, Context context) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } try { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); final int skip = Integer.parseInt(continuationToken); return listRules(topicName, subscriptionName, skip, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the first page of subscriptions with context. * * @param context Context to pass into request. * * @return A Mono that completes with a page of subscriptions. */ Mono<PagedResponse<SubscriptionProperties>> listSubscriptionsFirstPage(String topicName, Context context) { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return listSubscriptions(topicName, 0, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the next page of subscriptions with context. * * @param continuationToken Number of items to skip in feed. * @param context Context to pass into request. * * @return A Mono that completes with a page of subscriptions or empty if there are no items left. */ Mono<PagedResponse<SubscriptionProperties>> listSubscriptionsNextPage(String topicName, String continuationToken, Context context) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } try { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); final int skip = Integer.parseInt(continuationToken); return listSubscriptions(topicName, skip, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the first page of topics with context. * * @param context Context to pass into request. * * @return A Mono that completes with a page of topics. */ Mono<PagedResponse<TopicProperties>> listTopicsFirstPage(Context context) { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return listTopics(0, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Gets the next page of topics with context. * * @param continuationToken Number of items to skip in feed. * @param context Context to pass into request. * * @return A Mono that completes with a page of topics or empty if there are no items left. */ Mono<PagedResponse<TopicProperties>> listTopicsNextPage(String continuationToken, Context context) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } try { final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); final int skip = Integer.parseInt(continuationToken); return listTopics(skip, withTracing); } catch (RuntimeException e) { return monoError(logger, e); } } /** * Updates a queue with its context. * * @param queue Information about the queue to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * @param context Context to pass into request. * * @return A Mono that completes with the updated {@link QueueProperties}. */ Mono<Response<QueueProperties>> updateQueueWithResponse(QueueProperties queue, Context context) { if (queue == null) { return monoError(logger, new NullPointerException("'queue' cannot be null")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context contextWithHeaders = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE) .addData(AZURE_REQUEST_HTTP_HEADERS_KEY, new HttpHeaders()); final String forwardToEntity = queue.getForwardTo(); if (!CoreUtils.isNullOrEmpty(forwardToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardToEntity, contextWithHeaders); queue.setForwardTo(getAbsoluteUrlFromEntity(forwardToEntity)); } final String forwardDlqToEntity = queue.getForwardDeadLetteredMessagesTo(); if (!CoreUtils.isNullOrEmpty(forwardDlqToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_DLQ_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardDlqToEntity, contextWithHeaders); queue.setForwardDeadLetteredMessagesTo(getAbsoluteUrlFromEntity(forwardDlqToEntity)); } final QueueDescription queueDescription = EntityHelper.toImplementation(queue); final CreateQueueBodyContent content = new CreateQueueBodyContent() .setType(CONTENT_TYPE) .setQueueDescription(queueDescription); final CreateQueueBody createEntity = new CreateQueueBody() .setContent(content); try { return entityClient.putWithResponseAsync(queue.getName(), createEntity, "*", contextWithHeaders) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeQueue(response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates a rule with its context. * * @param rule Information about the rule to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * @param context Context to pass into request. * * @return A Mono that completes with the updated {@link RuleProperties}. */ Mono<Response<RuleProperties>> updateRuleWithResponse(String topicName, String subscriptionName, RuleProperties rule, Context context) { if (rule == null) { return monoError(logger, new NullPointerException("'rule' cannot be null")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final RuleDescription implementation = EntityHelper.toImplementation(rule); final CreateRuleBodyContent content = new CreateRuleBodyContent() .setType(CONTENT_TYPE) .setRuleDescription(implementation); final CreateRuleBody ruleBody = new CreateRuleBody() .setContent(content); final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return managementClient.getRules().putWithResponseAsync(topicName, subscriptionName, rule.getName(), ruleBody, "*", withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeRule(response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates a subscription with its context. * * @param subscription Information about the subscription to update. You must provide all the property values * that are desired on the updated entity. Any values not provided are set to the service default values. * @param context Context to pass into request. * * @return A Mono that completes with the updated {@link SubscriptionProperties}. */ Mono<Response<SubscriptionProperties>> updateSubscriptionWithResponse(SubscriptionProperties subscription, Context context) { if (subscription == null) { return monoError(logger, new NullPointerException("'subscription' cannot be null")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final Context contextWithHeaders = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE) .addData(AZURE_REQUEST_HTTP_HEADERS_KEY, new HttpHeaders()); final String forwardToEntity = subscription.getForwardTo(); if (!CoreUtils.isNullOrEmpty(forwardToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardToEntity, contextWithHeaders); subscription.setForwardTo(getAbsoluteUrlFromEntity(forwardToEntity)); } final String forwardDlqToEntity = subscription.getForwardDeadLetteredMessagesTo(); if (!CoreUtils.isNullOrEmpty(forwardDlqToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_DLQ_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardDlqToEntity, contextWithHeaders); subscription.setForwardDeadLetteredMessagesTo(getAbsoluteUrlFromEntity(forwardDlqToEntity)); } final String topicName = subscription.getTopicName(); final String subscriptionName = subscription.getSubscriptionName(); final SubscriptionDescription implementation = EntityHelper.toImplementation(subscription); final CreateSubscriptionBodyContent content = new CreateSubscriptionBodyContent() .setType(CONTENT_TYPE) .setSubscriptionDescription(implementation); final CreateSubscriptionBody createEntity = new CreateSubscriptionBody() .setContent(content); try { return managementClient.getSubscriptions().putWithResponseAsync(topicName, subscriptionName, createEntity, "*", contextWithHeaders) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeSubscription(topicName, response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates a topic with its context. * * @param topic Information about the topic to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * @param context Context to pass into request. * * @return A Mono that completes with the updated {@link TopicProperties}. */ Mono<Response<TopicProperties>> updateTopicWithResponse(TopicProperties topic, Context context) { if (topic == null) { return monoError(logger, new NullPointerException("'topic' cannot be null")); } else if (context == null) { return monoError(logger, new NullPointerException("'context' cannot be null.")); } final TopicDescription implementation = EntityHelper.toImplementation(topic); final CreateTopicBodyContent content = new CreateTopicBodyContent() .setType(CONTENT_TYPE) .setTopicDescription(implementation); final CreateTopicBody createEntity = new CreateTopicBody() .setContent(content); final Context withTracing = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); try { return entityClient.putWithResponseAsync(topic.getName(), createEntity, "*", withTracing) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeTopic(response)); } catch (RuntimeException ex) { return monoError(logger, ex); } } private <T> T deserialize(Object object, Class<T> clazz) { if (object == null) { return null; } final String contents = String.valueOf(object); if (contents.isEmpty()) { return null; } try { return serializer.deserialize(contents, clazz); } catch (IOException e) { throw logger.logExceptionAsError(new RuntimeException(String.format( "Exception while deserializing. Body: [%s]. Class: %s", contents, clazz), e)); } } /** * Given an HTTP response, will deserialize it into a strongly typed Response object. * * @param response HTTP response to deserialize response body from. * @param clazz Class to deserialize response type into. * @param <T> Class type to deserialize response into. * * @return A Response with a strongly typed response value. */ private <T> Response<T> deserialize(Response<Object> response, Class<T> clazz) { final T deserialize = deserialize(response.getValue(), clazz); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), deserialize); } /** * Converts a Response into its corresponding {@link QueueDescriptionEntry} then mapped into {@link * QueueProperties}. * * @param response HTTP Response to deserialize. * * @return The corresponding HTTP response with convenience properties set. */ private Response<QueueProperties> deserializeQueue(Response<Object> response) { final QueueDescriptionEntry entry = deserialize(response.getValue(), QueueDescriptionEntry.class); if (entry == null) { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent() == null) { logger.info("entry.getContent() is null. The entity may not exist. {}", entry); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent().getQueueDescription() == null) { final TopicDescriptionEntry entryTopic = deserialize(response.getValue(), TopicDescriptionEntry.class); if (entryTopic != null && entryTopic.getContent() != null && entryTopic.getContent().getTopicDescription() != null) { logger.warning("'{}' is not a queue, it is a topic.", entryTopic.getTitle()); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } } final QueueProperties result = EntityHelper.toModel(entry.getContent().getQueueDescription()); final String queueName = getTitleValue(entry.getTitle()); EntityHelper.setQueueName(result, queueName); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), result); } /** * Converts a Response into its corresponding {@link RuleDescriptionEntry} then mapped into {@link RuleProperties}. * * @param response HTTP Response to deserialize. * * @return The corresponding HTTP response with convenience properties set. */ private Response<RuleProperties> deserializeRule(Response<Object> response) { final RuleDescriptionEntry entry = deserialize(response.getValue(), RuleDescriptionEntry.class); if (entry == null) { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent() == null) { logger.info("entry.getContent() is null. The entity may not exist. {}", entry); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } final RuleDescription description = entry.getContent().getRuleDescription(); final RuleProperties result = EntityHelper.toModel(description); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), result); } /** * Converts a Response into its corresponding {@link SubscriptionDescriptionEntry} then mapped into {@link * SubscriptionProperties}. * * @param response HTTP Response to deserialize. * * @return The corresponding HTTP response with convenience properties set. */ private Response<SubscriptionProperties> deserializeSubscription(String topicName, Response<Object> response) { final SubscriptionDescriptionEntry entry = deserialize(response.getValue(), SubscriptionDescriptionEntry.class); if (entry == null) { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent() == null) { logger.warning("entry.getContent() is null. There should have been content returned. Entry: {}", entry); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } final SubscriptionProperties subscription = EntityHelper.toModel( entry.getContent().getSubscriptionDescription()); final String subscriptionName = getTitleValue(entry.getTitle()); EntityHelper.setSubscriptionName(subscription, subscriptionName); EntityHelper.setTopicName(subscription, topicName); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), subscription); } /** * Converts a Response into its corresponding {@link TopicDescriptionEntry} then mapped into {@link * QueueProperties}. * * @param response HTTP Response to deserialize. * * @return The corresponding HTTP response with convenience properties set. */ private Response<TopicProperties> deserializeTopic(Response<Object> response) { final TopicDescriptionEntry entry = deserialize(response.getValue(), TopicDescriptionEntry.class); if (entry == null) { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent() == null) { logger.warning("entry.getContent() is null. There should have been content returned. Entry: {}", entry); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent().getTopicDescription() == null) { final QueueDescriptionEntry entryQueue = deserialize(response.getValue(), QueueDescriptionEntry.class); if (entryQueue != null && entryQueue.getContent() != null && entryQueue.getContent().getQueueDescription() != null) { logger.warning("'{}' is not a topic, it is a queue.", entryQueue.getTitle()); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } } final TopicProperties result = EntityHelper.toModel(entry.getContent().getTopicDescription()); final String topicName = getTitleValue(entry.getTitle()); EntityHelper.setTopicName(result, topicName); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), result); } /** * Creates a {@link FeedPage} given the elements and a set of response links to get the next link from. * * @param entities Entities in the feed. * @param responseLinks Links returned from the feed. * @param <TResult> Type of Service Bus entities in page. * * @return A {@link FeedPage} indicating whether this can be continued or not. * @throws MalformedURLException if the "next" page link does not contain a well-formed URL. */ private <TResult, TFeed> FeedPage<TResult> extractPage(Response<TFeed> response, List<TResult> entities, List<ResponseLink> responseLinks) throws MalformedURLException, UnsupportedEncodingException { final Optional<ResponseLink> nextLink = responseLinks.stream() .filter(link -> link.getRel().equalsIgnoreCase("next")) .findFirst(); if (!nextLink.isPresent()) { return new FeedPage<>(response.getStatusCode(), response.getHeaders(), response.getRequest(), entities); } final URL url = new URL(nextLink.get().getHref()); final String decode = URLDecoder.decode(url.getQuery(), StandardCharsets.UTF_8.name()); final Optional<Integer> skipParameter = Arrays.stream(decode.split("&amp;|&")) .map(part -> part.split("=", 2)) .filter(parts -> parts[0].equalsIgnoreCase("$skip") && parts.length == 2) .map(parts -> Integer.valueOf(parts[1])) .findFirst(); if (skipParameter.isPresent()) { return new FeedPage<>(response.getStatusCode(), response.getHeaders(), response.getRequest(), entities, skipParameter.get()); } else { logger.warning("There should have been a skip parameter for the next page."); return new FeedPage<>(response.getStatusCode(), response.getHeaders(), response.getRequest(), entities); } } /** * Helper method that invokes the service method, extracts the data and translates it to a PagedResponse. * * @param skip Number of elements to skip. * @param context Context for the query. * * @return A Mono that completes with a paged response of queues. */ private Mono<PagedResponse<QueueProperties>> listQueues(int skip, Context context) { return managementClient.listEntitiesWithResponseAsync(QUEUES_ENTITY_TYPE, skip, NUMBER_OF_ELEMENTS, context) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .flatMap(response -> { final Response<QueueDescriptionFeed> feedResponse = deserialize(response, QueueDescriptionFeed.class); final QueueDescriptionFeed feed = feedResponse.getValue(); if (feed == null) { logger.warning("Could not deserialize QueueDescriptionFeed. skip {}, top: {}", skip, NUMBER_OF_ELEMENTS); return Mono.empty(); } final List<QueueProperties> entities = feed.getEntry().stream() .filter(e -> e.getContent() != null && e.getContent().getQueueDescription() != null) .map(e -> { final String queueName = getTitleValue(e.getTitle()); final QueueProperties queueProperties = EntityHelper.toModel( e.getContent().getQueueDescription()); EntityHelper.setQueueName(queueProperties, queueName); return queueProperties; }) .collect(Collectors.toList()); try { return Mono.just(extractPage(feedResponse, entities, feed.getLink())); } catch (MalformedURLException | UnsupportedEncodingException error) { return Mono.error(new RuntimeException("Could not parse response into FeedPage<QueueDescription>", error)); } }); } /** * Helper method that invokes the service method, extracts the data and translates it to a PagedResponse. * * @param skip Number of elements to skip. * @param context Context for the query. * * @return A Mono that completes with a paged response of rules. */ private Mono<PagedResponse<RuleProperties>> listRules(String topicName, String subscriptionName, int skip, Context context) { return managementClient.listRulesWithResponseAsync(topicName, subscriptionName, skip, NUMBER_OF_ELEMENTS, context) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .flatMap(response -> { final Response<RuleDescriptionFeed> feedResponse = deserialize(response, RuleDescriptionFeed.class); final RuleDescriptionFeed feed = feedResponse.getValue(); if (feed == null) { logger.warning("Could not deserialize RuleDescriptionFeed. skip {}, top: {}", skip, NUMBER_OF_ELEMENTS); return Mono.empty(); } final List<RuleProperties> entities = feed.getEntry().stream() .filter(e -> e.getContent() != null && e.getContent().getRuleDescription() != null) .map(e -> { return EntityHelper.toModel(e.getContent().getRuleDescription()); }) .collect(Collectors.toList()); try { return Mono.just(extractPage(feedResponse, entities, feed.getLink())); } catch (MalformedURLException | UnsupportedEncodingException error) { return Mono.error(new RuntimeException( "Could not parse response into FeedPage<RuleDescription>", error)); } }); } /** * Helper method that invokes the service method, extracts the data and translates it to a PagedResponse. * * @param skip Number of elements to skip. * @param context Context for the query. * * @return A Mono that completes with a paged response of subscriptions. */ private Mono<PagedResponse<SubscriptionProperties>> listSubscriptions(String topicName, int skip, Context context) { return managementClient.listSubscriptionsWithResponseAsync(topicName, skip, NUMBER_OF_ELEMENTS, context) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .flatMap(response -> { final Response<SubscriptionDescriptionFeed> feedResponse = deserialize(response, SubscriptionDescriptionFeed.class); final SubscriptionDescriptionFeed feed = feedResponse.getValue(); if (feed == null) { logger.warning("Could not deserialize SubscriptionDescriptionFeed. skip {}, top: {}", skip, NUMBER_OF_ELEMENTS); return Mono.empty(); } final List<SubscriptionProperties> entities = feed.getEntry().stream() .filter(e -> e.getContent() != null && e.getContent().getSubscriptionDescription() != null) .map(e -> { final String subscriptionName = getTitleValue(e.getTitle()); final SubscriptionProperties description = EntityHelper.toModel( e.getContent().getSubscriptionDescription()); EntityHelper.setTopicName(description, topicName); EntityHelper.setSubscriptionName(description, subscriptionName); return description; }) .collect(Collectors.toList()); try { return Mono.just(extractPage(feedResponse, entities, feed.getLink())); } catch (MalformedURLException | UnsupportedEncodingException error) { return Mono.error(new RuntimeException( "Could not parse response into FeedPage<SubscriptionDescription>", error)); } }); } /** * Helper method that invokes the service method, extracts the data and translates it to a PagedResponse. * * @param skip Number of elements to skip. * @param context Context for the query. * * @return A Mono that completes with a paged response of topics. */ private Mono<PagedResponse<TopicProperties>> listTopics(int skip, Context context) { return managementClient.listEntitiesWithResponseAsync(TOPICS_ENTITY_TYPE, skip, NUMBER_OF_ELEMENTS, context) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .flatMap(response -> { final Response<TopicDescriptionFeed> feedResponse = deserialize(response, TopicDescriptionFeed.class); final TopicDescriptionFeed feed = feedResponse.getValue(); if (feed == null) { logger.warning("Could not deserialize TopicDescriptionFeed. skip {}, top: {}", skip, NUMBER_OF_ELEMENTS); return Mono.empty(); } final List<TopicProperties> entities = feed.getEntry().stream() .filter(e -> e.getContent() != null && e.getContent().getTopicDescription() != null) .map(e -> { final String topicName = getTitleValue(e.getTitle()); final TopicProperties topicProperties = EntityHelper.toModel( e.getContent().getTopicDescription()); EntityHelper.setTopicName(topicProperties, topicName); return topicProperties; }) .collect(Collectors.toList()); try { return Mono.just(extractPage(feedResponse, entities, feed.getLink())); } catch (MalformedURLException | UnsupportedEncodingException error) { return Mono.error(new RuntimeException("Could not parse response into FeedPage<TopicDescription>", error)); } }); } /** * Check that the additional headers field is present and add the additional auth header * * @param headerName name of the header to be added * @param context current request context * * @return boolean representing the outcome of adding header operation */ private void addSupplementaryAuthHeader(String headerName, String entity, Context context) { context.getData(AZURE_REQUEST_HTTP_HEADERS_KEY) .ifPresent(headers -> { if (headers instanceof HttpHeaders) { HttpHeaders customHttpHeaders = (HttpHeaders) headers; customHttpHeaders.add(headerName, entity); } }); } /** * Checks if the given entity is an absolute URL, if so return it. * Otherwise, construct the URL from the given entity and return that. * * @param entity : entity to forward messages to. * * @return Forward to Entity represented as an absolute URL */ /** * Given an XML title element, returns the XML text inside. Jackson deserializes Objects as LinkedHashMaps. XML text * is represented as an entry with an empty string as the key. * * For example, the text returned from this {@code <title text="text/xml">QueueName</title>} is "QueueName". * * @param responseTitle XML title element. * * @return The XML text inside the title. {@code null} is returned if there is no value. */ @SuppressWarnings("unchecked") private String getTitleValue(Object responseTitle) { if (!(responseTitle instanceof Map)) { return null; } final Map<String, String> map; try { map = (Map<String, String>) responseTitle; return map.get(""); } catch (ClassCastException error) { logger.warning("Unable to cast to Map<String,String>. Title: {}", responseTitle, error); return null; } } /** * Maps an exception from the ATOM APIs to its associated {@link HttpResponseException}. * * @param exception Exception from the ATOM API. * * @return The corresponding {@link HttpResponseException} or {@code throwable} if it is not an instance of {@link * ServiceBusManagementErrorException}. */ private static Throwable mapException(Throwable exception) { if (!(exception instanceof ServiceBusManagementErrorException)) { return exception; } final ServiceBusManagementErrorException managementError = ((ServiceBusManagementErrorException) exception); final ServiceBusManagementError error = managementError.getValue(); final HttpResponse errorHttpResponse = managementError.getResponse(); final int statusCode = error != null && error.getCode() != null ? error.getCode() : errorHttpResponse.getStatusCode(); final String errorDetail = error != null && error.getDetail() != null ? error.getDetail() : managementError.getMessage(); switch (statusCode) { case 401: return new ClientAuthenticationException(errorDetail, managementError.getResponse(), exception); case 404: return new ResourceNotFoundException(errorDetail, managementError.getResponse(), exception); case 409: return new ResourceExistsException(errorDetail, managementError.getResponse(), exception); case 412: return new ResourceModifiedException(errorDetail, managementError.getResponse(), exception); default: return new HttpResponseException(errorDetail, managementError.getResponse(), exception); } } /** * A page of Service Bus entities. * * @param <T> The entity description from Service Bus. */ private static final class FeedPage<T> implements PagedResponse<T> { private final int statusCode; private final HttpHeaders header; private final HttpRequest request; private final IterableStream<T> entries; private final String continuationToken; /** * Creates a page that does not have any more pages. * * @param entries Items in the page. */ private FeedPage(int statusCode, HttpHeaders header, HttpRequest request, List<T> entries) { this.statusCode = statusCode; this.header = header; this.request = request; this.entries = new IterableStream<>(entries); this.continuationToken = null; } /** * Creates an instance that has additional pages to fetch. * * @param entries Items in the page. * @param skip Number of elements to "skip". */ private FeedPage(int statusCode, HttpHeaders header, HttpRequest request, List<T> entries, int skip) { this.statusCode = statusCode; this.header = header; this.request = request; this.entries = new IterableStream<>(entries); this.continuationToken = String.valueOf(skip); } @Override public IterableStream<T> getElements() { return entries; } @Override public String getContinuationToken() { return continuationToken; } @Override public int getStatusCode() { return statusCode; } @Override public HttpHeaders getHeaders() { return header; } @Override public HttpRequest getRequest() { return request; } @Override public void close() { } } private static final class EntityNotFoundHttpResponse<T> extends HttpResponse { private final int statusCode; private final HttpHeaders headers; private EntityNotFoundHttpResponse(Response<T> response) { super(response.getRequest()); this.headers = response.getHeaders(); this.statusCode = response.getStatusCode(); } @Override public int getStatusCode() { return statusCode; } @Override public String getHeaderValue(String name) { return headers.getValue(name); } @Override public HttpHeaders getHeaders() { return headers; } @Override public Flux<ByteBuffer> getBody() { return Flux.empty(); } @Override public Mono<byte[]> getBodyAsByteArray() { return Mono.empty(); } @Override public Mono<String> getBodyAsString() { return Mono.empty(); } @Override public Mono<String> getBodyAsString(Charset charset) { return Mono.empty(); } } }
@conniey should these services be moved to a team-wide resource (not something with coniey)?
void deserializeSubscriptionDescriptionFeed() throws IOException { final String contents = getContents("SubscriptionDescriptionFeed.xml"); final List<ResponseLink> responseLinks = Collections.singletonList( new ResponseLink().setRel("self") .setHref("https: ); final String topicName = "topic"; final String subscriptionName1 = "subscription-0"; final String subscriptionName2 = "subscription-session-0"; final String subscriptionName3 = "subscription-session-1"; final SubscriptionDescription subscription1 = EntityHelper.getSubscriptionDescription( new CreateSubscriptionOptions() .setLockDuration(Duration.ofSeconds(15)) .setDefaultMessageTimeToLive(Duration.ofMinutes(5)) .setMaxDeliveryCount(5) .setAutoDeleteOnIdle(Duration.ofDays(1))); final SubscriptionDescription subscription2 = EntityHelper.getSubscriptionDescription( new CreateSubscriptionOptions() .setSessionRequired(true) .setLockDuration(Duration.ofSeconds(15)) .setMaxDeliveryCount(5)); final SubscriptionDescription subscription3 = EntityHelper.getSubscriptionDescription( new CreateSubscriptionOptions() .setSessionRequired(true) .setLockDuration(Duration.ofSeconds(15)) .setMaxDeliveryCount(5)); final List<SubscriptionDescription> expectedDescriptions = Arrays.asList( subscription1, subscription2, subscription3); final SubscriptionDescriptionEntry entry1 = new SubscriptionDescriptionEntry() .setId("https: .setTitle(getResponseTitle(subscriptionName1)) .setPublished(OffsetDateTime.parse("2020-06-22T23:47:53Z")) .setUpdated(OffsetDateTime.parse("2020-06-23T23:47:53Z")) .setLink(new ResponseLink().setRel("self").setHref("Subscriptions/subscription-0?api-version=2021-05")) .setContent(new SubscriptionDescriptionEntryContent() .setType("application/xml") .setSubscriptionDescription(subscription1)); final SubscriptionDescriptionEntry entry2 = new SubscriptionDescriptionEntry() .setId("https: .setTitle(getResponseTitle(subscriptionName2)) .setPublished(OffsetDateTime.parse("2020-06-22T23:47:53Z")) .setUpdated(OffsetDateTime.parse("2020-05-22T23:47:53Z")) .setLink(new ResponseLink().setRel("self").setHref("Subscriptions/subscription-session-0?api-version=2021-05")) .setContent(new SubscriptionDescriptionEntryContent() .setType("application/xml") .setSubscriptionDescription(subscription2)); final SubscriptionDescriptionEntry entry3 = new SubscriptionDescriptionEntry() .setId("https: .setTitle(getResponseTitle(subscriptionName3)) .setPublished(OffsetDateTime.parse("2020-06-22T23:47:54Z")) .setUpdated(OffsetDateTime.parse("2020-04-22T23:47:54Z")) .setLink(new ResponseLink().setRel("self").setHref("Subscriptions/subscription-session-1?api-version=2021-05")) .setContent(new SubscriptionDescriptionEntryContent() .setType("application/xml") .setSubscriptionDescription(subscription3)); final Map<String, String> titleMap = new HashMap<>(); titleMap.put("", "Subscriptions"); titleMap.put("type", "text"); final List<SubscriptionDescriptionEntry> entries = Arrays.asList(entry1, entry2, entry3); final SubscriptionDescriptionFeed expected = new SubscriptionDescriptionFeed() .setId("feed-id") .setTitle(titleMap) .setUpdated(OffsetDateTime.parse("2020-06-30T11:41:32Z")) .setLink(responseLinks) .setEntry(entries); final int expectedNumberOfEntries = 11; final SubscriptionDescriptionFeed actual = serializer.deserialize(contents, SubscriptionDescriptionFeed.class); assertEquals(expected.getId(), actual.getId()); assertEquals(expected.getTitle(), actual.getTitle()); assertEquals(expected.getUpdated(), actual.getUpdated()); assertNotNull(actual.getLink()); assertEquals(expected.getLink().size(), actual.getLink().size()); for (int i = 0; i < expected.getLink().size(); i++) { final ResponseLink expectedLink = expected.getLink().get(i); final ResponseLink actualLink = actual.getLink().get(i); assertEquals(expectedLink.getRel(), actualLink.getRel()); assertEquals(expectedLink.getHref(), actualLink.getHref()); } assertNotNull(actual.getEntry()); assertTrue(expected.getEntry().size() < actual.getEntry().size()); assertEquals(expectedNumberOfEntries, actual.getEntry().size()); for (int i = 0; i < expected.getEntry().size(); i++) { final SubscriptionDescriptionEntry expectedEntry = expected.getEntry().get(i); final SubscriptionDescriptionEntry actualEntry = actual.getEntry().get(i); assertEquals(expected.getId(), actual.getId()); assertNotNull(actual.getTitle()); assertResponseTitle(expectedEntry.getTitle(), actualEntry.getTitle()); assertEquals(expectedEntry.getUpdated(), actualEntry.getUpdated()); assertEquals(expectedEntry.getPublished(), actualEntry.getPublished()); final SubscriptionDescription expectedSubscription = expectedDescriptions.get(i); assertSubscriptionEquals(expectedSubscription, EntityStatus.ACTIVE, actualEntry.getContent().getSubscriptionDescription()); } }
.setId("https:
void deserializeSubscriptionDescriptionFeed() throws IOException { final String contents = getContents("SubscriptionDescriptionFeed.xml"); final List<ResponseLink> responseLinks = Collections.singletonList( new ResponseLink().setRel("self") .setHref("https: ); final String topicName = "topic"; final String subscriptionName1 = "subscription-0"; final String subscriptionName2 = "subscription-session-0"; final String subscriptionName3 = "subscription-session-1"; final SubscriptionDescription subscription1 = EntityHelper.getSubscriptionDescription( new CreateSubscriptionOptions() .setLockDuration(Duration.ofSeconds(15)) .setDefaultMessageTimeToLive(Duration.ofMinutes(5)) .setMaxDeliveryCount(5) .setAutoDeleteOnIdle(Duration.ofDays(1))); final SubscriptionDescription subscription2 = EntityHelper.getSubscriptionDescription( new CreateSubscriptionOptions() .setSessionRequired(true) .setLockDuration(Duration.ofSeconds(15)) .setMaxDeliveryCount(5)); final SubscriptionDescription subscription3 = EntityHelper.getSubscriptionDescription( new CreateSubscriptionOptions() .setSessionRequired(true) .setLockDuration(Duration.ofSeconds(15)) .setMaxDeliveryCount(5)); final List<SubscriptionDescription> expectedDescriptions = Arrays.asList( subscription1, subscription2, subscription3); final SubscriptionDescriptionEntry entry1 = new SubscriptionDescriptionEntry() .setId("https: .setTitle(getResponseTitle(subscriptionName1)) .setPublished(OffsetDateTime.parse("2020-06-22T23:47:53Z")) .setUpdated(OffsetDateTime.parse("2020-06-23T23:47:53Z")) .setLink(new ResponseLink().setRel("self").setHref("Subscriptions/subscription-0?api-version=2021-05")) .setContent(new SubscriptionDescriptionEntryContent() .setType("application/xml") .setSubscriptionDescription(subscription1)); final SubscriptionDescriptionEntry entry2 = new SubscriptionDescriptionEntry() .setId("https: .setTitle(getResponseTitle(subscriptionName2)) .setPublished(OffsetDateTime.parse("2020-06-22T23:47:53Z")) .setUpdated(OffsetDateTime.parse("2020-05-22T23:47:53Z")) .setLink(new ResponseLink().setRel("self").setHref("Subscriptions/subscription-session-0?api-version=2021-05")) .setContent(new SubscriptionDescriptionEntryContent() .setType("application/xml") .setSubscriptionDescription(subscription2)); final SubscriptionDescriptionEntry entry3 = new SubscriptionDescriptionEntry() .setId("https: .setTitle(getResponseTitle(subscriptionName3)) .setPublished(OffsetDateTime.parse("2020-06-22T23:47:54Z")) .setUpdated(OffsetDateTime.parse("2020-04-22T23:47:54Z")) .setLink(new ResponseLink().setRel("self").setHref("Subscriptions/subscription-session-1?api-version=2021-05")) .setContent(new SubscriptionDescriptionEntryContent() .setType("application/xml") .setSubscriptionDescription(subscription3)); final Map<String, String> titleMap = new HashMap<>(); titleMap.put("", "Subscriptions"); titleMap.put("type", "text"); final List<SubscriptionDescriptionEntry> entries = Arrays.asList(entry1, entry2, entry3); final SubscriptionDescriptionFeed expected = new SubscriptionDescriptionFeed() .setId("feed-id") .setTitle(titleMap) .setUpdated(OffsetDateTime.parse("2020-06-30T11:41:32Z")) .setLink(responseLinks) .setEntry(entries); final int expectedNumberOfEntries = 11; final SubscriptionDescriptionFeed actual = serializer.deserialize(contents, SubscriptionDescriptionFeed.class); assertEquals(expected.getId(), actual.getId()); assertEquals(expected.getTitle(), actual.getTitle()); assertEquals(expected.getUpdated(), actual.getUpdated()); assertNotNull(actual.getLink()); assertEquals(expected.getLink().size(), actual.getLink().size()); for (int i = 0; i < expected.getLink().size(); i++) { final ResponseLink expectedLink = expected.getLink().get(i); final ResponseLink actualLink = actual.getLink().get(i); assertEquals(expectedLink.getRel(), actualLink.getRel()); assertEquals(expectedLink.getHref(), actualLink.getHref()); } assertNotNull(actual.getEntry()); assertTrue(expected.getEntry().size() < actual.getEntry().size()); assertEquals(expectedNumberOfEntries, actual.getEntry().size()); for (int i = 0; i < expected.getEntry().size(); i++) { final SubscriptionDescriptionEntry expectedEntry = expected.getEntry().get(i); final SubscriptionDescriptionEntry actualEntry = actual.getEntry().get(i); assertEquals(expected.getId(), actual.getId()); assertNotNull(actual.getTitle()); assertResponseTitle(expectedEntry.getTitle(), actualEntry.getTitle()); assertEquals(expectedEntry.getUpdated(), actualEntry.getUpdated()); assertEquals(expectedEntry.getPublished(), actualEntry.getPublished()); final SubscriptionDescription expectedSubscription = expectedDescriptions.get(i); assertSubscriptionEquals(expectedSubscription, EntityStatus.ACTIVE, actualEntry.getContent().getSubscriptionDescription()); } }
class ServiceBusManagementSerializerTest { private static final String TITLE_KEY = ""; private final ServiceBusManagementSerializer serializer = new ServiceBusManagementSerializer(); /** * Verify we can deserialize XML request when creating a queue. */ @Test void deserializeCreateQueueDescription() throws IOException { final String contents = getContents("CreateQueueEntry.xml"); final AuthorizationRule rule = new SharedAccessAuthorizationRule("test-name", "fakePrimaryKey", "fakeSecondaryKey", Collections.singletonList(AccessRights.SEND)); final CreateQueueOptions expected = new CreateQueueOptions() .setAutoDeleteOnIdle(null) .setDefaultMessageTimeToLive(null) .setDuplicateDetectionHistoryTimeWindow(null) .setLockDuration(Duration.ofMinutes(10)) .setMaxSizeInMegabytes(1028) .setDuplicateDetectionRequired(false) .setSessionRequired(true) .setDeadLetteringOnMessageExpiration(false) .setMaxDeliveryCount(5) .setBatchedOperationsEnabled(true) .setPartitioningEnabled(false); expected.getAuthorizationRules().add(rule); final QueueDescriptionEntry entry = serializer.deserialize(contents, QueueDescriptionEntry.class); assertNotNull(entry); assertNotNull(entry.getContent()); final QueueDescription actual = entry.getContent().getQueueDescription(); assertQueueEquals(expected, EntityStatus.ACTIVE, actual); final List<AuthorizationRule> actualRules = actual.getAuthorizationRules().stream() .map(TestAuthorizationRule::new) .collect(Collectors.toList()); TestUtils.assertAuthorizationRules(expected.getAuthorizationRules(), actualRules); } /** * Verify we can deserialize XML from a GET queue request. */ @Test void deserializeQueueDescription() throws IOException { final String contents = getContents("QueueDescriptionEntry.xml"); final String queueName = "my-test-queue"; final CreateQueueOptions expected = new CreateQueueOptions() .setLockDuration(Duration.ofMinutes(5)) .setMaxSizeInMegabytes(1024) .setDuplicateDetectionRequired(true) .setSessionRequired(true) .setDefaultMessageTimeToLive(Duration.parse("PT3H20M10S")) .setDeadLetteringOnMessageExpiration(false) .setDuplicateDetectionHistoryTimeWindow(Duration.ofMinutes(10)) .setMaxDeliveryCount(10) .setBatchedOperationsEnabled(true) .setAutoDeleteOnIdle(Duration.ofHours(5)) .setPartitioningEnabled(true); final QueueDescriptionEntry entry = serializer.deserialize(contents, QueueDescriptionEntry.class); assertNotNull(entry); assertNotNull(entry.getContent()); assertTitle(queueName, entry.getTitle()); final QueueDescription actual = entry.getContent().getQueueDescription(); assertQueueEquals(expected, EntityStatus.DELETING, actual); } /** * Verify we can deserialize XML from a GET queue request and create convenience model, {@link QueueRuntimeProperties}. */ @Test void deserializeQueueRuntimeProperties() throws IOException { final String contents = getContents("QueueDescriptionEntry.xml"); final OffsetDateTime createdAt = OffsetDateTime.parse("2020-06-05T03:55:07.5Z"); final OffsetDateTime updatedAt = OffsetDateTime.parse("2020-06-05T03:45:07.64Z"); final OffsetDateTime accessedAt = OffsetDateTime.parse("0001-01-01T00:00:00Z"); final int sizeInBytes = 2048; final int messageCount = 23; final MessageCountDetails expectedCount = new MessageCountDetails() .setActiveMessageCount(5) .setDeadLetterMessageCount(3) .setScheduledMessageCount(65) .setTransferMessageCount(10) .setTransferDeadLetterMessageCount(123); final QueueDescriptionEntry entry = serializer.deserialize(contents, QueueDescriptionEntry.class); final QueueProperties properties = EntityHelper.toModel(entry.getContent().getQueueDescription()); final QueueRuntimeProperties actual = new QueueRuntimeProperties(properties); assertEquals(sizeInBytes, actual.getSizeInBytes()); assertEquals(messageCount, actual.getTotalMessageCount()); assertEquals(createdAt, actual.getCreatedAt()); assertEquals(updatedAt, actual.getUpdatedAt()); assertEquals(accessedAt, actual.getAccessedAt()); assertEquals(expectedCount.getActiveMessageCount(), actual.getActiveMessageCount()); assertEquals(expectedCount.getDeadLetterMessageCount(), actual.getDeadLetterMessageCount()); assertEquals(expectedCount.getScheduledMessageCount(), actual.getScheduledMessageCount()); assertEquals(expectedCount.getTransferMessageCount(), actual.getTransferMessageCount()); assertEquals(expectedCount.getTransferDeadLetterMessageCount(), actual.getTransferDeadLetterMessageCount()); } /** * Verify we can deserialize feed XML from a list queues operation that has a paged response. */ @Test void deserializeQueueDescriptionFeedPaged() throws IOException { final String contents = getContents("QueueDescriptionFeed-Paged.xml"); final List<ResponseLink> responseLinks = Arrays.asList( new ResponseLink().setRel("self") .setHref("https: new ResponseLink().setRel("next") .setHref("https: ); final String queueName = "q-0"; final CreateQueueOptions options = new CreateQueueOptions() .setLockDuration(Duration.ofMinutes(10)) .setMaxSizeInMegabytes(102) .setDuplicateDetectionRequired(true) .setSessionRequired(true) .setDefaultMessageTimeToLive(Duration.ofSeconds(10)) .setDeadLetteringOnMessageExpiration(false) .setDuplicateDetectionHistoryTimeWindow(Duration.ofMinutes(10)) .setMaxDeliveryCount(10) .setBatchedOperationsEnabled(true) .setAutoDeleteOnIdle(Duration.ofSeconds(5)) .setPartitioningEnabled(true); final QueueDescription queueProperties = EntityHelper.getQueueDescription(options); final QueueDescriptionEntry entry1 = new QueueDescriptionEntry() .setBase("https: .setId("https: .setTitle(getResponseTitle("q-0")) .setPublished(OffsetDateTime.parse("2020-03-05T07:17:04Z")) .setUpdated(OffsetDateTime.parse("2020-01-05T07:17:04Z")) .setAuthor(new ResponseAuthor().setName("sb-java")) .setLink(new ResponseLink().setRel("self").setHref("../q-0?api-version=2021-05")) .setContent(new QueueDescriptionEntryContent().setType("application/xml") .setQueueDescription(queueProperties)); final QueueDescriptionEntry entry2 = new QueueDescriptionEntry() .setBase("https: .setId("https: .setTitle(getResponseTitle("q-1")) .setPublished(OffsetDateTime.parse("2020-06-10T07:16:25Z")) .setUpdated(OffsetDateTime.parse("2020-06-15T07:16:25Z")) .setAuthor(new ResponseAuthor().setName("sb-java2")) .setLink(new ResponseLink().setRel("self").setHref("../q-1?api-version=2021-05")) .setContent(new QueueDescriptionEntryContent().setType("application/xml") .setQueueDescription(queueProperties)); final QueueDescriptionEntry entry3 = new QueueDescriptionEntry() .setBase("https: .setId("https: .setTitle(getResponseTitle("q-2")) .setPublished(OffsetDateTime.parse("2020-06-05T07:17:06Z")) .setUpdated(OffsetDateTime.parse("2020-06-05T07:17:06Z")) .setAuthor(new ResponseAuthor().setName("sb-java3")) .setLink(new ResponseLink().setRel("self").setHref("../q-2?api-version=2021-05")) .setContent(new QueueDescriptionEntryContent().setType("application/xml") .setQueueDescription(queueProperties)); final Map<String, String> titleMap = new HashMap<>(); titleMap.put("", "Queues"); titleMap.put("type", "text"); final List<QueueDescriptionEntry> entries = Arrays.asList(entry1, entry2, entry3); final QueueDescriptionFeed expected = new QueueDescriptionFeed() .setId("feed-id") .setTitle(titleMap) .setUpdated(OffsetDateTime.parse("2020-12-05T07:17:21Z")) .setLink(responseLinks) .setEntry(entries); final QueueDescriptionFeed actual = serializer.deserialize(contents, QueueDescriptionFeed.class); assertEquals(expected.getId(), actual.getId()); assertEquals(expected.getTitle(), actual.getTitle()); assertEquals(expected.getUpdated(), actual.getUpdated()); assertNotNull(actual.getLink()); assertEquals(expected.getLink().size(), actual.getLink().size()); for (int i = 0; i < expected.getLink().size(); i++) { final ResponseLink expectedLink = expected.getLink().get(i); final ResponseLink actualLink = actual.getLink().get(i); assertEquals(expectedLink.getRel(), actualLink.getRel()); assertEquals(expectedLink.getHref(), actualLink.getHref()); } assertNotNull(actual.getEntry()); assertEquals(expected.getEntry().size(), actual.getEntry().size()); for (int i = 0; i < expected.getEntry().size(); i++) { final QueueDescriptionEntry expectedEntry = expected.getEntry().get(i); final QueueDescriptionEntry actualEntry = actual.getEntry().get(i); assertEquals(expected.getId(), actual.getId()); assertNotNull(actual.getTitle()); assertResponseTitle(expectedEntry.getTitle(), actualEntry.getTitle()); assertEquals(expectedEntry.getUpdated(), actualEntry.getUpdated()); assertEquals(expectedEntry.getPublished(), actualEntry.getPublished()); assertEquals(expectedEntry.getAuthor().getName(), actualEntry.getAuthor().getName()); assertQueueEquals(options, EntityStatus.ACTIVE, actualEntry.getContent().getQueueDescription()); } } /** * Verify we can deserialize XML from a GET namespace request. */ @Test void deserializeNamespace() throws IOException { final String contents = getContents("NamespaceEntry.xml"); final String name = "ShivangiServiceBus"; final String alias = "MyServiceBusFallback"; final OffsetDateTime createdTime = OffsetDateTime.parse("2020-04-09T08:38:55.807Z"); final OffsetDateTime modifiedTime = OffsetDateTime.parse("2020-06-12T06:34:38.383Z"); final MessagingSku sku = MessagingSku.PREMIUM; final NamespaceType namespaceType = NamespaceType.MESSAGING; final NamespacePropertiesEntry entry = serializer.deserialize(contents, NamespacePropertiesEntry.class); assertNotNull(entry); assertNotNull(entry.getContent()); assertTitle(name, entry.getTitle()); final NamespaceProperties actual = entry.getContent().getNamespaceProperties(); assertEquals(name, actual.getName()); assertEquals(alias, actual.getAlias()); assertEquals(createdTime, actual.getCreatedTime()); assertEquals(modifiedTime, actual.getModifiedTime()); assertEquals(sku, actual.getMessagingSku()); assertEquals(namespaceType, actual.getNamespaceType()); } /** * Verify we can deserialize XML from a GET subscription request. */ @Test void deserializeSubscription() throws IOException { final String contents = getContents("SubscriptionDescriptionEntry.xml"); final SubscriptionDescription expected = new SubscriptionDescription() .setLockDuration(Duration.ofSeconds(15)) .setRequiresSession(true) .setDefaultMessageTimeToLive(ServiceBusConstants.MAX_DURATION) .setDeadLetteringOnMessageExpiration(false) .setDeadLetteringOnFilterEvaluationExceptions(true) .setEnableBatchedOperations(true) .setMaxDeliveryCount(5) .setAutoDeleteOnIdle(Duration.ofHours(1).plusMinutes(48)); final SubscriptionDescriptionEntry entry = serializer.deserialize(contents, SubscriptionDescriptionEntry.class); assertNotNull(entry); assertNotNull(entry.getContent()); final SubscriptionDescription actual = entry.getContent().getSubscriptionDescription(); assertSubscriptionEquals(expected, EntityStatus.ACTIVE, actual); } /** * Verify we can deserialize XML from a PUT subscription request. */ @Test void deserializeCreateSubscription() throws IOException { final String contents = getContents("CreateSubscriptionEntry.xml"); final String topicName = "topic"; final String subscriptionName = "sub46850f"; final SubscriptionDescription expected = EntityHelper.getSubscriptionDescription( new CreateSubscriptionOptions() .setAutoDeleteOnIdle(Duration.parse("P10675199DT2H48M5.477S")) .setDefaultMessageTimeToLive(Duration.parse("P10675199DT2H48M5.477S")) .setSessionRequired(false) .setLockDuration(Duration.ofSeconds(45)) .setMaxDeliveryCount(7)); final SubscriptionDescriptionEntry entry = serializer.deserialize(contents, SubscriptionDescriptionEntry.class); assertNotNull(entry); assertNotNull(entry.getContent()); final SubscriptionDescription actual = entry.getContent().getSubscriptionDescription(); assertSubscriptionEquals(expected, EntityStatus.ACTIVE, actual); } /** * Verify we can deserialize XML from a GET subscription request and create convenience model, {@link * SubscriptionRuntimeProperties}. */ @Test void deserializeSubscriptionRuntimeProperties() throws IOException { final String contents = getContents("SubscriptionDescriptionEntry.xml"); final OffsetDateTime createdAt = OffsetDateTime.parse("2020-06-22T23:47:54.0131447Z"); final OffsetDateTime updatedAt = OffsetDateTime.parse("2020-06-22T23:47:20.0131447Z"); final OffsetDateTime accessedAt = OffsetDateTime.parse("2020-06-22T23:47:54.013Z"); final int messageCount = 13; final MessageCountDetails expectedCount = new MessageCountDetails() .setActiveMessageCount(10) .setDeadLetterMessageCount(50) .setScheduledMessageCount(34) .setTransferMessageCount(11) .setTransferDeadLetterMessageCount(2); final SubscriptionDescriptionEntry entry = serializer.deserialize(contents, SubscriptionDescriptionEntry.class); final SubscriptionRuntimeProperties actual = new SubscriptionRuntimeProperties( EntityHelper.toModel(entry.getContent().getSubscriptionDescription())); assertEquals(messageCount, actual.getTotalMessageCount()); assertEquals(createdAt, actual.getCreatedAt()); assertEquals(updatedAt, actual.getUpdatedAt()); assertEquals(accessedAt, actual.getAccessedAt()); assertEquals(expectedCount.getActiveMessageCount(), actual.getActiveMessageCount()); assertEquals(expectedCount.getDeadLetterMessageCount(), actual.getDeadLetterMessageCount()); assertEquals(expectedCount.getTransferMessageCount(), actual.getTransferMessageCount()); assertEquals(expectedCount.getTransferDeadLetterMessageCount(), actual.getTransferDeadLetterMessageCount()); } /** * Verify we can deserialize feed XML from a list of subscriptions that has a paged response. */ @Test /** * Verify we can deserialize XML from a GET rule. */ @Test void deserializeSqlRule() throws IOException { final String contents = getContents("SqlRuleFilter.xml"); final RuleDescription expectedRule = new RuleDescription() .setName("foo") .setCreatedAt(OffsetDateTime.parse("2020-08-28T04:32:20.9387321Z")) .setAction(new EmptyRuleActionImpl()) .setFilter(new SqlFilterImpl() .setCompatibilityLevel("20") .setSqlExpression("type = \"TestType\"")); final RuleDescriptionEntry expected = new RuleDescriptionEntry() .setId("sb: .setPublished(OffsetDateTime.parse("2020-08-28T04:32:20Z")) .setUpdated(OffsetDateTime.parse("2020-08-28T04:34:20Z")) .setContent(new RuleDescriptionEntryContent() .setRuleDescription(expectedRule) .setType("application/xml")); final RuleDescriptionEntry actual = serializer.deserialize(contents, RuleDescriptionEntry.class); assertRuleEntryEquals(expected, actual); } /** * Verify we can deserialize XML from a GET rule that includes an action. */ @Test void deserializeSqlRuleWithAction() throws IOException { final String contents = getContents("SqlRuleFilterWithAction.xml"); final RuleDescription expectedRule = new RuleDescription() .setName("foo") .setCreatedAt(OffsetDateTime.parse("2020-08-28T04:51:24.9967451Z")) .setAction(new SqlRuleActionImpl() .setCompatibilityLevel("20") .setSqlExpression("set FilterTag = 'true'")) .setFilter(new SqlFilterImpl() .setCompatibilityLevel("20") .setSqlExpression("type = \"TestType\"")); final RuleDescriptionEntry expected = new RuleDescriptionEntry() .setId("https: .setPublished(OffsetDateTime.parse("2020-08-28T04:51:24Z")) .setUpdated(OffsetDateTime.parse("2020-08-28T04:54:24Z")) .setContent(new RuleDescriptionEntryContent() .setRuleDescription(expectedRule) .setType("application/xml")); final RuleDescriptionEntry actual = serializer.deserialize(contents, RuleDescriptionEntry.class); assertRuleEntryEquals(expected, actual); } /** * Verify we can deserialize XML from a GET correlation filter rule that includes an action. */ @Test void deserializeCorrelationFilterRule() throws IOException { final String contents = getContents("CorrelationRuleFilter.xml"); final RuleDescription expectedRule = new RuleDescription() .setName("correlation-test") .setCreatedAt(OffsetDateTime.parse("2020-08-28T04:32:50.7697024Z")) .setAction(new EmptyRuleActionImpl()) .setFilter(new CorrelationFilterImpl() .setLabel("matching-label")); final RuleDescriptionEntry expected = new RuleDescriptionEntry() .setId("sb: .setPublished(OffsetDateTime.parse("2020-08-28T04:32:50Z")) .setUpdated(OffsetDateTime.parse("2020-08-28T04:34:50Z")) .setContent(new RuleDescriptionEntryContent() .setRuleDescription(expectedRule) .setType("application/xml")); final RuleDescriptionEntry actual = serializer.deserialize(contents, RuleDescriptionEntry.class); assertRuleEntryEquals(expected, actual); } /** * Verify we can deserialize XML from a GET rule that includes an action. */ @Test void deserializeRulesFeed() throws IOException { final String contents = getContents("RuleDescriptionFeed.xml"); final RuleDescription defaultRule = new RuleDescription() .setName("$Default") .setCreatedAt(OffsetDateTime.parse("2020-08-12T18:48:00.1005312Z")) .setAction(new EmptyRuleActionImpl()) .setFilter(new TrueFilterImpl().setCompatibilityLevel("20").setSqlExpression("1=1")); final RuleDescriptionEntry defaultRuleEntry = new RuleDescriptionEntry() .setId("https: .setPublished(OffsetDateTime.parse("2020-08-12T18:48:00Z")) .setUpdated(OffsetDateTime.parse("2020-08-12T18:48:00Z")) .setContent(new RuleDescriptionEntryContent() .setRuleDescription(defaultRule) .setType("application/xml")); final RuleDescription correlation = new RuleDescription() .setName("correl") .setCreatedAt(OffsetDateTime.parse("2020-08-28T04:32:50.7697024Z")) .setAction(new EmptyRuleActionImpl()) .setFilter(new CorrelationFilterImpl() .setLabel("matching-label")); final RuleDescriptionEntry correlationEntry = new RuleDescriptionEntry() .setId("https: .setPublished(OffsetDateTime.parse("2020-08-28T04:32:50Z")) .setUpdated(OffsetDateTime.parse("2020-08-28T04:32:50Z")) .setContent(new RuleDescriptionEntryContent() .setRuleDescription(correlation) .setType("application/xml")); final RuleDescription sqlRule = new RuleDescription() .setName("foo") .setCreatedAt(OffsetDateTime.parse("2020-08-28T04:51:24.9967451Z")) .setAction(new SqlRuleActionImpl() .setCompatibilityLevel("20") .setSqlExpression("set FilterTag = 'true'")) .setFilter(new SqlFilterImpl() .setCompatibilityLevel("20") .setSqlExpression("type = \"TestType\"")); final RuleDescriptionEntry sqlRuleEntry = new RuleDescriptionEntry() .setId("https: .setPublished(OffsetDateTime.parse("2020-08-28T04:32:20Z")) .setUpdated(OffsetDateTime.parse("2020-08-28T04:32:20Z")) .setContent(new RuleDescriptionEntryContent() .setRuleDescription(sqlRule) .setType("application/xml")); final List<RuleDescriptionEntry> expectedEntries = Arrays.asList(defaultRuleEntry, correlationEntry, sqlRuleEntry); final RuleDescriptionFeed expected = new RuleDescriptionFeed() .setEntry(expectedEntries) .setId("https: .setUpdated(OffsetDateTime.parse("2020-08-28T14:59:16Z")); final RuleDescriptionFeed actual = serializer.deserialize(contents, RuleDescriptionFeed.class); assertNotNull(actual); assertEquals(expected.getId(), actual.getId()); final List<RuleDescriptionEntry> actualEntries = actual.getEntry(); assertNotNull(actualEntries); assertEquals(expectedEntries.size(), actualEntries.size()); for (int i = 0; i < expected.getEntry().size(); i++) { final RuleDescriptionEntry expectedRule = expectedEntries.get(i); final RuleDescriptionEntry actualRule = actualEntries.get(i); assertRuleEntryEquals(expectedRule, actualRule); } } @Test void deserializeRuleEntry() throws IOException { final String contents = getContents("CreateRuleEntry.xml"); final RuleDescription description = new RuleDescription() .setName("connies-bar") .setAction(new SqlRuleActionImpl().setSqlExpression("SET Label = 'my-label'")) .setFilter(new TrueFilterImpl().setSqlExpression("1=1")); final RuleDescriptionEntryContent content = new RuleDescriptionEntryContent() .setRuleDescription(description) .setType("application/xml"); final RuleDescriptionEntry expected = new RuleDescriptionEntry().setContent(content); final RuleDescriptionEntry actual = serializer.deserialize(contents, RuleDescriptionEntry.class); assertRuleEntryEquals(expected, actual); } @Test void deserializeRuleEntryResponse() throws IOException { final String contents = getContents("CreateRuleEntryResponse.xml"); final RuleDescription description = new RuleDescription() .setName("connies-bar") .setAction(new SqlRuleActionImpl().setSqlExpression("SET Label = 'my-label'").setCompatibilityLevel("20")) .setFilter(new TrueFilterImpl().setSqlExpression("1=1").setCompatibilityLevel("20")) .setCreatedAt(OffsetDateTime.parse("2020-10-05T23:34:21.5963322Z")); final RuleDescriptionEntryContent content = new RuleDescriptionEntryContent() .setRuleDescription(description) .setType("application/xml"); final RuleDescriptionEntry expected = new RuleDescriptionEntry() .setId("https: .setPublished(OffsetDateTime.parse("2020-10-05T23:31:21Z")) .setUpdated(OffsetDateTime.parse("2020-10-05T23:30:21Z")) .setLink(new ResponseLink() .setRel("self") .setHref("https: .setContent(content); final RuleDescriptionEntry actual = serializer.deserialize(contents, RuleDescriptionEntry.class); assertRuleEntryEquals(expected, actual); } /** * Given a file name, gets the corresponding resource and its contents as a string. * * @param fileName Name of file to fetch. * * @return Contents of the file. */ private String getContents(String fileName) { final URL resourceUrl = getClass().getClassLoader().getResource("."); assertNotNull(resourceUrl); final File resourceFolder = new File(resourceUrl.getFile(), "xml"); assertTrue(resourceFolder.exists()); final Path path = Paths.get(resourceFolder.getPath(), fileName); try { return new String(Files.readAllBytes(path), StandardCharsets.UTF_8); } catch (IOException e) { fail(String.format("Unable to read file: ' %s'. Error: %s", path.getFileName(), e)); return null; } } private static void assertQueueEquals(CreateQueueOptions expected, EntityStatus expectedStatus, QueueDescription actual) { assertEquals(expected.getAutoDeleteOnIdle(), actual.getAutoDeleteOnIdle()); assertEquals(expected.getLockDuration(), actual.getLockDuration()); assertEquals(expected.getMaxSizeInMegabytes(), actual.getMaxSizeInMegabytes()); assertEquals(expected.isDuplicateDetectionRequired(), actual.isRequiresDuplicateDetection()); assertEquals(expected.isSessionRequired(), actual.isRequiresSession()); assertEquals(expected.getDefaultMessageTimeToLive(), actual.getDefaultMessageTimeToLive()); assertEquals(expected.isDeadLetteringOnMessageExpiration(), actual.isDeadLetteringOnMessageExpiration()); assertEquals(expected.getDuplicateDetectionHistoryTimeWindow(), actual.getDuplicateDetectionHistoryTimeWindow()); assertEquals(expected.getMaxDeliveryCount(), actual.getMaxDeliveryCount()); assertEquals(expected.isBatchedOperationsEnabled(), actual.isEnableBatchedOperations()); assertEquals(expected.getAutoDeleteOnIdle(), actual.getAutoDeleteOnIdle()); assertEquals(expected.isPartitioningEnabled(), actual.isEnablePartitioning()); assertEquals(expectedStatus, actual.getStatus()); } private static void assertSubscriptionEquals(SubscriptionDescription expected, EntityStatus expectedStatus, SubscriptionDescription actual) { assertEquals(expected.getAutoDeleteOnIdle(), actual.getAutoDeleteOnIdle()); assertEquals(expected.getLockDuration(), actual.getLockDuration()); assertEquals(expected.isDeadLetteringOnFilterEvaluationExceptions(), actual.isDeadLetteringOnFilterEvaluationExceptions()); assertEquals(expected.isRequiresSession(), actual.isRequiresSession()); assertEquals(expected.getDefaultMessageTimeToLive(), actual.getDefaultMessageTimeToLive()); assertEquals(expected.isDeadLetteringOnMessageExpiration(), actual.isDeadLetteringOnMessageExpiration()); assertEquals(expected.getMaxDeliveryCount(), actual.getMaxDeliveryCount()); assertEquals(expected.isEnableBatchedOperations(), actual.isEnableBatchedOperations()); assertEquals(expected.getAutoDeleteOnIdle(), actual.getAutoDeleteOnIdle()); assertEquals(expectedStatus, actual.getStatus()); } private static void assertRuleEntryEquals(RuleDescriptionEntry expected, RuleDescriptionEntry actual) { if (expected == null) { assertNull(actual); return; } assertNotNull(actual); assertEquals(expected.getId(), actual.getId()); if (expected.getContent() == null) { assertNull(actual.getContent()); return; } assertNotNull(actual.getContent()); assertEquals(expected.getContent().getType(), actual.getContent().getType()); final RuleDescription expectedRule = expected.getContent().getRuleDescription(); final RuleDescription actualRule = actual.getContent().getRuleDescription(); assertNotNull(actualRule); assertRuleEquals(expectedRule, actualRule); } private static void assertRuleEquals(RuleDescription expected, RuleDescription actual) { if (expected == null) { assertNull(actual); return; } assertNotNull(actual); assertEquals(expected.getName(), actual.getName()); if (expected.getAction() instanceof EmptyRuleActionImpl) { assertTrue(actual.getAction() instanceof EmptyRuleActionImpl); } else if (expected.getAction() instanceof SqlRuleActionImpl) { assertTrue(actual.getAction() instanceof SqlRuleActionImpl); final SqlRuleActionImpl expectedAction = (SqlRuleActionImpl) expected.getAction(); final SqlRuleActionImpl actualAction = (SqlRuleActionImpl) actual.getAction(); assertEquals(expectedAction.getCompatibilityLevel(), actualAction.getCompatibilityLevel()); assertEquals(expectedAction.getSqlExpression(), actualAction.getSqlExpression()); assertEquals(expectedAction.isRequiresPreprocessing(), actualAction.isRequiresPreprocessing()); assertParameters(expectedAction.getParameters(), actualAction.getParameters()); } if (expected.getFilter() instanceof TrueFilterImpl) { assertTrue(actual.getFilter() instanceof TrueFilterImpl); } else if (expected.getFilter() instanceof FalseFilterImpl) { assertTrue(actual.getFilter() instanceof FalseFilterImpl); } if (expected.getFilter() instanceof SqlFilterImpl) { assertTrue(actual.getFilter() instanceof SqlFilterImpl); final SqlFilterImpl expectedFilter = (SqlFilterImpl) expected.getFilter(); final SqlFilterImpl actualFilter = (SqlFilterImpl) actual.getFilter(); assertEquals(expectedFilter.getCompatibilityLevel(), actualFilter.getCompatibilityLevel()); assertEquals(expectedFilter.getSqlExpression(), actualFilter.getSqlExpression()); assertParameters(expectedFilter.getParameters(), actualFilter.getParameters()); } else if (expected.getFilter() instanceof CorrelationFilterImpl) { assertTrue(actual.getFilter() instanceof CorrelationFilterImpl); final CorrelationFilterImpl expectedFilter = (CorrelationFilterImpl) expected.getFilter(); final CorrelationFilterImpl actualFilter = (CorrelationFilterImpl) actual.getFilter(); assertEquals(expectedFilter.getCorrelationId(), actualFilter.getCorrelationId()); assertEquals(expectedFilter.getMessageId(), actualFilter.getMessageId()); assertEquals(expectedFilter.getTo(), actualFilter.getTo()); assertEquals(expectedFilter.getReplyTo(), actualFilter.getReplyTo()); assertEquals(expectedFilter.getReplyToSessionId(), actualFilter.getReplyToSessionId()); assertEquals(expectedFilter.getSessionId(), actualFilter.getSessionId()); assertEquals(expectedFilter.getContentType(), actualFilter.getContentType()); assertParameters(expectedFilter.getProperties(), actualFilter.getProperties()); } } private static void assertParameters(List<KeyValueImpl> expected, List<KeyValueImpl> actual) { if (expected == null) { assertNull(actual); return; } assertNotNull(actual); assertEquals(expected.size(), actual.size()); final Map<String, KeyValueImpl> actualMap = actual.stream() .collect(Collectors.toMap(KeyValueImpl::getKey, Function.identity())); for (KeyValueImpl item : expected) { final KeyValueImpl removed = actualMap.remove(item.getKey()); assertNotNull(removed); assertEquals(item.getValue(), removed.getValue()); } assertTrue(actualMap.isEmpty()); } @SuppressWarnings("unchecked") private static void assertTitle(String expectedTitle, Object responseTitle) { assertTrue(responseTitle instanceof LinkedHashMap); final LinkedHashMap<String, String> map = (LinkedHashMap<String, String>) responseTitle; assertTrue(map.containsKey(TITLE_KEY)); assertEquals(expectedTitle, map.get(TITLE_KEY)); } @SuppressWarnings("unchecked") private static void assertResponseTitle(Object expectedResponseTitle, Object actualResponseTitle) { assertTrue(actualResponseTitle instanceof LinkedHashMap); final LinkedHashMap<String, String> actualMap = (LinkedHashMap<String, String>) actualResponseTitle; assertTrue(actualMap.containsKey(TITLE_KEY)); assertTitle(actualMap.get(TITLE_KEY), expectedResponseTitle); } private static LinkedHashMap<String, String> getResponseTitle(String entityName) { final LinkedHashMap<String, String> map = new LinkedHashMap<>(); map.put("", entityName); map.put("type", "text"); return map; } private static class TestAuthorizationRule implements AuthorizationRule { private final List<AccessRights> accessRights; private final String claimType; private final String claimValue; private final String keyName; private final OffsetDateTime createdAt; private final OffsetDateTime modifiedAt; private final String primaryKey; private final String secondaryKey; TestAuthorizationRule(AuthorizationRuleImpl rule) { this.accessRights = rule.getRights(); this.claimType = rule.getClaimType(); this.claimValue = rule.getClaimValue(); this.createdAt = rule.getCreatedTime(); this.keyName = rule.getKeyName(); this.modifiedAt = rule.getModifiedTime(); this.primaryKey = rule.getPrimaryKey(); this.secondaryKey = rule.getSecondaryKey(); } @Override public List<AccessRights> getAccessRights() { return accessRights; } @Override public String getClaimType() { return claimType; } @Override public String getClaimValue() { return claimValue; } @Override public OffsetDateTime getCreatedAt() { return createdAt; } @Override public String getKeyName() { return keyName; } @Override public OffsetDateTime getModifiedAt() { return modifiedAt; } @Override public String getPrimaryKey() { return primaryKey; } @Override public String getSecondaryKey() { return secondaryKey; } } }
class ServiceBusManagementSerializerTest { private static final String TITLE_KEY = ""; private final ServiceBusManagementSerializer serializer = new ServiceBusManagementSerializer(); /** * Verify we can deserialize XML request when creating a queue. */ @Test void deserializeCreateQueueDescription() throws IOException { final String contents = getContents("CreateQueueEntry.xml"); final AuthorizationRule rule = new SharedAccessAuthorizationRule("test-name", "fakePrimaryKey", "fakeSecondaryKey", Collections.singletonList(AccessRights.SEND)); final CreateQueueOptions expected = new CreateQueueOptions() .setAutoDeleteOnIdle(null) .setDefaultMessageTimeToLive(null) .setDuplicateDetectionHistoryTimeWindow(null) .setLockDuration(Duration.ofMinutes(10)) .setMaxSizeInMegabytes(1028) .setDuplicateDetectionRequired(false) .setSessionRequired(true) .setDeadLetteringOnMessageExpiration(false) .setMaxDeliveryCount(5) .setBatchedOperationsEnabled(true) .setPartitioningEnabled(false); expected.getAuthorizationRules().add(rule); final QueueDescriptionEntry entry = serializer.deserialize(contents, QueueDescriptionEntry.class); assertNotNull(entry); assertNotNull(entry.getContent()); final QueueDescription actual = entry.getContent().getQueueDescription(); assertQueueEquals(expected, EntityStatus.ACTIVE, actual); final List<AuthorizationRule> actualRules = actual.getAuthorizationRules().stream() .map(TestAuthorizationRule::new) .collect(Collectors.toList()); TestUtils.assertAuthorizationRules(expected.getAuthorizationRules(), actualRules); } /** * Verify we can deserialize XML from a GET queue request. */ @Test void deserializeQueueDescription() throws IOException { final String contents = getContents("QueueDescriptionEntry.xml"); final String queueName = "my-test-queue"; final CreateQueueOptions expected = new CreateQueueOptions() .setLockDuration(Duration.ofMinutes(5)) .setMaxSizeInMegabytes(1024) .setDuplicateDetectionRequired(true) .setSessionRequired(true) .setDefaultMessageTimeToLive(Duration.parse("PT3H20M10S")) .setDeadLetteringOnMessageExpiration(false) .setDuplicateDetectionHistoryTimeWindow(Duration.ofMinutes(10)) .setMaxDeliveryCount(10) .setBatchedOperationsEnabled(true) .setAutoDeleteOnIdle(Duration.ofHours(5)) .setPartitioningEnabled(true); final QueueDescriptionEntry entry = serializer.deserialize(contents, QueueDescriptionEntry.class); assertNotNull(entry); assertNotNull(entry.getContent()); assertTitle(queueName, entry.getTitle()); final QueueDescription actual = entry.getContent().getQueueDescription(); assertQueueEquals(expected, EntityStatus.DELETING, actual); } /** * Verify we can deserialize XML from a GET queue request and create convenience model, {@link QueueRuntimeProperties}. */ @Test void deserializeQueueRuntimeProperties() throws IOException { final String contents = getContents("QueueDescriptionEntry.xml"); final OffsetDateTime createdAt = OffsetDateTime.parse("2020-06-05T03:55:07.5Z"); final OffsetDateTime updatedAt = OffsetDateTime.parse("2020-06-05T03:45:07.64Z"); final OffsetDateTime accessedAt = OffsetDateTime.parse("0001-01-01T00:00:00Z"); final int sizeInBytes = 2048; final int messageCount = 23; final MessageCountDetails expectedCount = new MessageCountDetails() .setActiveMessageCount(5) .setDeadLetterMessageCount(3) .setScheduledMessageCount(65) .setTransferMessageCount(10) .setTransferDeadLetterMessageCount(123); final QueueDescriptionEntry entry = serializer.deserialize(contents, QueueDescriptionEntry.class); final QueueProperties properties = EntityHelper.toModel(entry.getContent().getQueueDescription()); final QueueRuntimeProperties actual = new QueueRuntimeProperties(properties); assertEquals(sizeInBytes, actual.getSizeInBytes()); assertEquals(messageCount, actual.getTotalMessageCount()); assertEquals(createdAt, actual.getCreatedAt()); assertEquals(updatedAt, actual.getUpdatedAt()); assertEquals(accessedAt, actual.getAccessedAt()); assertEquals(expectedCount.getActiveMessageCount(), actual.getActiveMessageCount()); assertEquals(expectedCount.getDeadLetterMessageCount(), actual.getDeadLetterMessageCount()); assertEquals(expectedCount.getScheduledMessageCount(), actual.getScheduledMessageCount()); assertEquals(expectedCount.getTransferMessageCount(), actual.getTransferMessageCount()); assertEquals(expectedCount.getTransferDeadLetterMessageCount(), actual.getTransferDeadLetterMessageCount()); } /** * Verify we can deserialize feed XML from a list queues operation that has a paged response. */ @Test void deserializeQueueDescriptionFeedPaged() throws IOException { final String contents = getContents("QueueDescriptionFeed-Paged.xml"); final List<ResponseLink> responseLinks = Arrays.asList( new ResponseLink().setRel("self") .setHref("https: new ResponseLink().setRel("next") .setHref("https: ); final String queueName = "q-0"; final CreateQueueOptions options = new CreateQueueOptions() .setLockDuration(Duration.ofMinutes(10)) .setMaxSizeInMegabytes(102) .setDuplicateDetectionRequired(true) .setSessionRequired(true) .setDefaultMessageTimeToLive(Duration.ofSeconds(10)) .setDeadLetteringOnMessageExpiration(false) .setDuplicateDetectionHistoryTimeWindow(Duration.ofMinutes(10)) .setMaxDeliveryCount(10) .setBatchedOperationsEnabled(true) .setAutoDeleteOnIdle(Duration.ofSeconds(5)) .setPartitioningEnabled(true); final QueueDescription queueProperties = EntityHelper.getQueueDescription(options); final QueueDescriptionEntry entry1 = new QueueDescriptionEntry() .setBase("https: .setId("https: .setTitle(getResponseTitle("q-0")) .setPublished(OffsetDateTime.parse("2020-03-05T07:17:04Z")) .setUpdated(OffsetDateTime.parse("2020-01-05T07:17:04Z")) .setAuthor(new ResponseAuthor().setName("sb-java")) .setLink(new ResponseLink().setRel("self").setHref("../q-0?api-version=2021-05")) .setContent(new QueueDescriptionEntryContent().setType("application/xml") .setQueueDescription(queueProperties)); final QueueDescriptionEntry entry2 = new QueueDescriptionEntry() .setBase("https: .setId("https: .setTitle(getResponseTitle("q-1")) .setPublished(OffsetDateTime.parse("2020-06-10T07:16:25Z")) .setUpdated(OffsetDateTime.parse("2020-06-15T07:16:25Z")) .setAuthor(new ResponseAuthor().setName("sb-java2")) .setLink(new ResponseLink().setRel("self").setHref("../q-1?api-version=2021-05")) .setContent(new QueueDescriptionEntryContent().setType("application/xml") .setQueueDescription(queueProperties)); final QueueDescriptionEntry entry3 = new QueueDescriptionEntry() .setBase("https: .setId("https: .setTitle(getResponseTitle("q-2")) .setPublished(OffsetDateTime.parse("2020-06-05T07:17:06Z")) .setUpdated(OffsetDateTime.parse("2020-06-05T07:17:06Z")) .setAuthor(new ResponseAuthor().setName("sb-java3")) .setLink(new ResponseLink().setRel("self").setHref("../q-2?api-version=2021-05")) .setContent(new QueueDescriptionEntryContent().setType("application/xml") .setQueueDescription(queueProperties)); final Map<String, String> titleMap = new HashMap<>(); titleMap.put("", "Queues"); titleMap.put("type", "text"); final List<QueueDescriptionEntry> entries = Arrays.asList(entry1, entry2, entry3); final QueueDescriptionFeed expected = new QueueDescriptionFeed() .setId("feed-id") .setTitle(titleMap) .setUpdated(OffsetDateTime.parse("2020-12-05T07:17:21Z")) .setLink(responseLinks) .setEntry(entries); final QueueDescriptionFeed actual = serializer.deserialize(contents, QueueDescriptionFeed.class); assertEquals(expected.getId(), actual.getId()); assertEquals(expected.getTitle(), actual.getTitle()); assertEquals(expected.getUpdated(), actual.getUpdated()); assertNotNull(actual.getLink()); assertEquals(expected.getLink().size(), actual.getLink().size()); for (int i = 0; i < expected.getLink().size(); i++) { final ResponseLink expectedLink = expected.getLink().get(i); final ResponseLink actualLink = actual.getLink().get(i); assertEquals(expectedLink.getRel(), actualLink.getRel()); assertEquals(expectedLink.getHref(), actualLink.getHref()); } assertNotNull(actual.getEntry()); assertEquals(expected.getEntry().size(), actual.getEntry().size()); for (int i = 0; i < expected.getEntry().size(); i++) { final QueueDescriptionEntry expectedEntry = expected.getEntry().get(i); final QueueDescriptionEntry actualEntry = actual.getEntry().get(i); assertEquals(expected.getId(), actual.getId()); assertNotNull(actual.getTitle()); assertResponseTitle(expectedEntry.getTitle(), actualEntry.getTitle()); assertEquals(expectedEntry.getUpdated(), actualEntry.getUpdated()); assertEquals(expectedEntry.getPublished(), actualEntry.getPublished()); assertEquals(expectedEntry.getAuthor().getName(), actualEntry.getAuthor().getName()); assertQueueEquals(options, EntityStatus.ACTIVE, actualEntry.getContent().getQueueDescription()); } } /** * Verify we can deserialize XML from a GET namespace request. */ @Test void deserializeNamespace() throws IOException { final String contents = getContents("NamespaceEntry.xml"); final String name = "ShivangiServiceBus"; final String alias = "MyServiceBusFallback"; final OffsetDateTime createdTime = OffsetDateTime.parse("2020-04-09T08:38:55.807Z"); final OffsetDateTime modifiedTime = OffsetDateTime.parse("2020-06-12T06:34:38.383Z"); final MessagingSku sku = MessagingSku.PREMIUM; final NamespaceType namespaceType = NamespaceType.MESSAGING; final NamespacePropertiesEntry entry = serializer.deserialize(contents, NamespacePropertiesEntry.class); assertNotNull(entry); assertNotNull(entry.getContent()); assertTitle(name, entry.getTitle()); final NamespaceProperties actual = entry.getContent().getNamespaceProperties(); assertEquals(name, actual.getName()); assertEquals(alias, actual.getAlias()); assertEquals(createdTime, actual.getCreatedTime()); assertEquals(modifiedTime, actual.getModifiedTime()); assertEquals(sku, actual.getMessagingSku()); assertEquals(namespaceType, actual.getNamespaceType()); } /** * Verify we can deserialize XML from a GET subscription request. */ @Test void deserializeSubscription() throws IOException { final String contents = getContents("SubscriptionDescriptionEntry.xml"); final SubscriptionDescription expected = new SubscriptionDescription() .setLockDuration(Duration.ofSeconds(15)) .setRequiresSession(true) .setDefaultMessageTimeToLive(ServiceBusConstants.MAX_DURATION) .setDeadLetteringOnMessageExpiration(false) .setDeadLetteringOnFilterEvaluationExceptions(true) .setEnableBatchedOperations(true) .setMaxDeliveryCount(5) .setAutoDeleteOnIdle(Duration.ofHours(1).plusMinutes(48)); final SubscriptionDescriptionEntry entry = serializer.deserialize(contents, SubscriptionDescriptionEntry.class); assertNotNull(entry); assertNotNull(entry.getContent()); final SubscriptionDescription actual = entry.getContent().getSubscriptionDescription(); assertSubscriptionEquals(expected, EntityStatus.ACTIVE, actual); } /** * Verify we can deserialize XML from a PUT subscription request. */ @Test void deserializeCreateSubscription() throws IOException { final String contents = getContents("CreateSubscriptionEntry.xml"); final String topicName = "topic"; final String subscriptionName = "sub46850f"; final SubscriptionDescription expected = EntityHelper.getSubscriptionDescription( new CreateSubscriptionOptions() .setAutoDeleteOnIdle(Duration.parse("P10675199DT2H48M5.477S")) .setDefaultMessageTimeToLive(Duration.parse("P10675199DT2H48M5.477S")) .setSessionRequired(false) .setLockDuration(Duration.ofSeconds(45)) .setMaxDeliveryCount(7)); final SubscriptionDescriptionEntry entry = serializer.deserialize(contents, SubscriptionDescriptionEntry.class); assertNotNull(entry); assertNotNull(entry.getContent()); final SubscriptionDescription actual = entry.getContent().getSubscriptionDescription(); assertSubscriptionEquals(expected, EntityStatus.ACTIVE, actual); } /** * Verify we can deserialize XML from a GET subscription request and create convenience model, {@link * SubscriptionRuntimeProperties}. */ @Test void deserializeSubscriptionRuntimeProperties() throws IOException { final String contents = getContents("SubscriptionDescriptionEntry.xml"); final OffsetDateTime createdAt = OffsetDateTime.parse("2020-06-22T23:47:54.0131447Z"); final OffsetDateTime updatedAt = OffsetDateTime.parse("2020-06-22T23:47:20.0131447Z"); final OffsetDateTime accessedAt = OffsetDateTime.parse("2020-06-22T23:47:54.013Z"); final int messageCount = 13; final MessageCountDetails expectedCount = new MessageCountDetails() .setActiveMessageCount(10) .setDeadLetterMessageCount(50) .setScheduledMessageCount(34) .setTransferMessageCount(11) .setTransferDeadLetterMessageCount(2); final SubscriptionDescriptionEntry entry = serializer.deserialize(contents, SubscriptionDescriptionEntry.class); final SubscriptionRuntimeProperties actual = new SubscriptionRuntimeProperties( EntityHelper.toModel(entry.getContent().getSubscriptionDescription())); assertEquals(messageCount, actual.getTotalMessageCount()); assertEquals(createdAt, actual.getCreatedAt()); assertEquals(updatedAt, actual.getUpdatedAt()); assertEquals(accessedAt, actual.getAccessedAt()); assertEquals(expectedCount.getActiveMessageCount(), actual.getActiveMessageCount()); assertEquals(expectedCount.getDeadLetterMessageCount(), actual.getDeadLetterMessageCount()); assertEquals(expectedCount.getTransferMessageCount(), actual.getTransferMessageCount()); assertEquals(expectedCount.getTransferDeadLetterMessageCount(), actual.getTransferDeadLetterMessageCount()); } /** * Verify we can deserialize feed XML from a list of subscriptions that has a paged response. */ @Test /** * Verify we can deserialize XML from a GET rule. */ @Test void deserializeSqlRule() throws IOException { final String contents = getContents("SqlRuleFilter.xml"); final RuleDescription expectedRule = new RuleDescription() .setName("foo") .setCreatedAt(OffsetDateTime.parse("2020-08-28T04:32:20.9387321Z")) .setAction(new EmptyRuleActionImpl()) .setFilter(new SqlFilterImpl() .setCompatibilityLevel("20") .setSqlExpression("type = \"TestType\"")); final RuleDescriptionEntry expected = new RuleDescriptionEntry() .setId("sb: .setPublished(OffsetDateTime.parse("2020-08-28T04:32:20Z")) .setUpdated(OffsetDateTime.parse("2020-08-28T04:34:20Z")) .setContent(new RuleDescriptionEntryContent() .setRuleDescription(expectedRule) .setType("application/xml")); final RuleDescriptionEntry actual = serializer.deserialize(contents, RuleDescriptionEntry.class); assertRuleEntryEquals(expected, actual); } /** * Verify we can deserialize XML from a GET rule that includes an action. */ @Test void deserializeSqlRuleWithAction() throws IOException { final String contents = getContents("SqlRuleFilterWithAction.xml"); final RuleDescription expectedRule = new RuleDescription() .setName("foo") .setCreatedAt(OffsetDateTime.parse("2020-08-28T04:51:24.9967451Z")) .setAction(new SqlRuleActionImpl() .setCompatibilityLevel("20") .setSqlExpression("set FilterTag = 'true'")) .setFilter(new SqlFilterImpl() .setCompatibilityLevel("20") .setSqlExpression("type = \"TestType\"")); final RuleDescriptionEntry expected = new RuleDescriptionEntry() .setId("https: .setPublished(OffsetDateTime.parse("2020-08-28T04:51:24Z")) .setUpdated(OffsetDateTime.parse("2020-08-28T04:54:24Z")) .setContent(new RuleDescriptionEntryContent() .setRuleDescription(expectedRule) .setType("application/xml")); final RuleDescriptionEntry actual = serializer.deserialize(contents, RuleDescriptionEntry.class); assertRuleEntryEquals(expected, actual); } /** * Verify we can deserialize XML from a GET correlation filter rule that includes an action. */ @Test void deserializeCorrelationFilterRule() throws IOException { final String contents = getContents("CorrelationRuleFilter.xml"); final RuleDescription expectedRule = new RuleDescription() .setName("correlation-test") .setCreatedAt(OffsetDateTime.parse("2020-08-28T04:32:50.7697024Z")) .setAction(new EmptyRuleActionImpl()) .setFilter(new CorrelationFilterImpl() .setLabel("matching-label")); final RuleDescriptionEntry expected = new RuleDescriptionEntry() .setId("sb: .setPublished(OffsetDateTime.parse("2020-08-28T04:32:50Z")) .setUpdated(OffsetDateTime.parse("2020-08-28T04:34:50Z")) .setContent(new RuleDescriptionEntryContent() .setRuleDescription(expectedRule) .setType("application/xml")); final RuleDescriptionEntry actual = serializer.deserialize(contents, RuleDescriptionEntry.class); assertRuleEntryEquals(expected, actual); } /** * Verify we can deserialize XML from a GET rule that includes an action. */ @Test void deserializeRulesFeed() throws IOException { final String contents = getContents("RuleDescriptionFeed.xml"); final RuleDescription defaultRule = new RuleDescription() .setName("$Default") .setCreatedAt(OffsetDateTime.parse("2020-08-12T18:48:00.1005312Z")) .setAction(new EmptyRuleActionImpl()) .setFilter(new TrueFilterImpl().setCompatibilityLevel("20").setSqlExpression("1=1")); final RuleDescriptionEntry defaultRuleEntry = new RuleDescriptionEntry() .setId("https: .setPublished(OffsetDateTime.parse("2020-08-12T18:48:00Z")) .setUpdated(OffsetDateTime.parse("2020-08-12T18:48:00Z")) .setContent(new RuleDescriptionEntryContent() .setRuleDescription(defaultRule) .setType("application/xml")); final RuleDescription correlation = new RuleDescription() .setName("correl") .setCreatedAt(OffsetDateTime.parse("2020-08-28T04:32:50.7697024Z")) .setAction(new EmptyRuleActionImpl()) .setFilter(new CorrelationFilterImpl() .setLabel("matching-label")); final RuleDescriptionEntry correlationEntry = new RuleDescriptionEntry() .setId("https: .setPublished(OffsetDateTime.parse("2020-08-28T04:32:50Z")) .setUpdated(OffsetDateTime.parse("2020-08-28T04:32:50Z")) .setContent(new RuleDescriptionEntryContent() .setRuleDescription(correlation) .setType("application/xml")); final RuleDescription sqlRule = new RuleDescription() .setName("foo") .setCreatedAt(OffsetDateTime.parse("2020-08-28T04:51:24.9967451Z")) .setAction(new SqlRuleActionImpl() .setCompatibilityLevel("20") .setSqlExpression("set FilterTag = 'true'")) .setFilter(new SqlFilterImpl() .setCompatibilityLevel("20") .setSqlExpression("type = \"TestType\"")); final RuleDescriptionEntry sqlRuleEntry = new RuleDescriptionEntry() .setId("https: .setPublished(OffsetDateTime.parse("2020-08-28T04:32:20Z")) .setUpdated(OffsetDateTime.parse("2020-08-28T04:32:20Z")) .setContent(new RuleDescriptionEntryContent() .setRuleDescription(sqlRule) .setType("application/xml")); final List<RuleDescriptionEntry> expectedEntries = Arrays.asList(defaultRuleEntry, correlationEntry, sqlRuleEntry); final RuleDescriptionFeed expected = new RuleDescriptionFeed() .setEntry(expectedEntries) .setId("https: .setUpdated(OffsetDateTime.parse("2020-08-28T14:59:16Z")); final RuleDescriptionFeed actual = serializer.deserialize(contents, RuleDescriptionFeed.class); assertNotNull(actual); assertEquals(expected.getId(), actual.getId()); final List<RuleDescriptionEntry> actualEntries = actual.getEntry(); assertNotNull(actualEntries); assertEquals(expectedEntries.size(), actualEntries.size()); for (int i = 0; i < expected.getEntry().size(); i++) { final RuleDescriptionEntry expectedRule = expectedEntries.get(i); final RuleDescriptionEntry actualRule = actualEntries.get(i); assertRuleEntryEquals(expectedRule, actualRule); } } @Test void deserializeRuleEntry() throws IOException { final String contents = getContents("CreateRuleEntry.xml"); final RuleDescription description = new RuleDescription() .setName("connies-bar") .setAction(new SqlRuleActionImpl().setSqlExpression("SET Label = 'my-label'")) .setFilter(new TrueFilterImpl().setSqlExpression("1=1")); final RuleDescriptionEntryContent content = new RuleDescriptionEntryContent() .setRuleDescription(description) .setType("application/xml"); final RuleDescriptionEntry expected = new RuleDescriptionEntry().setContent(content); final RuleDescriptionEntry actual = serializer.deserialize(contents, RuleDescriptionEntry.class); assertRuleEntryEquals(expected, actual); } @Test void deserializeRuleEntryResponse() throws IOException { final String contents = getContents("CreateRuleEntryResponse.xml"); final RuleDescription description = new RuleDescription() .setName("connies-bar") .setAction(new SqlRuleActionImpl().setSqlExpression("SET Label = 'my-label'").setCompatibilityLevel("20")) .setFilter(new TrueFilterImpl().setSqlExpression("1=1").setCompatibilityLevel("20")) .setCreatedAt(OffsetDateTime.parse("2020-10-05T23:34:21.5963322Z")); final RuleDescriptionEntryContent content = new RuleDescriptionEntryContent() .setRuleDescription(description) .setType("application/xml"); final RuleDescriptionEntry expected = new RuleDescriptionEntry() .setId("https: .setPublished(OffsetDateTime.parse("2020-10-05T23:31:21Z")) .setUpdated(OffsetDateTime.parse("2020-10-05T23:30:21Z")) .setLink(new ResponseLink() .setRel("self") .setHref("https: .setContent(content); final RuleDescriptionEntry actual = serializer.deserialize(contents, RuleDescriptionEntry.class); assertRuleEntryEquals(expected, actual); } /** * Given a file name, gets the corresponding resource and its contents as a string. * * @param fileName Name of file to fetch. * * @return Contents of the file. */ private String getContents(String fileName) { final URL resourceUrl = getClass().getClassLoader().getResource("."); assertNotNull(resourceUrl); final File resourceFolder = new File(resourceUrl.getFile(), "xml"); assertTrue(resourceFolder.exists()); final Path path = Paths.get(resourceFolder.getPath(), fileName); try { return new String(Files.readAllBytes(path), StandardCharsets.UTF_8); } catch (IOException e) { fail(String.format("Unable to read file: ' %s'. Error: %s", path.getFileName(), e)); return null; } } private static void assertQueueEquals(CreateQueueOptions expected, EntityStatus expectedStatus, QueueDescription actual) { assertEquals(expected.getAutoDeleteOnIdle(), actual.getAutoDeleteOnIdle()); assertEquals(expected.getLockDuration(), actual.getLockDuration()); assertEquals(expected.getMaxSizeInMegabytes(), actual.getMaxSizeInMegabytes()); assertEquals(expected.isDuplicateDetectionRequired(), actual.isRequiresDuplicateDetection()); assertEquals(expected.isSessionRequired(), actual.isRequiresSession()); assertEquals(expected.getDefaultMessageTimeToLive(), actual.getDefaultMessageTimeToLive()); assertEquals(expected.isDeadLetteringOnMessageExpiration(), actual.isDeadLetteringOnMessageExpiration()); assertEquals(expected.getDuplicateDetectionHistoryTimeWindow(), actual.getDuplicateDetectionHistoryTimeWindow()); assertEquals(expected.getMaxDeliveryCount(), actual.getMaxDeliveryCount()); assertEquals(expected.isBatchedOperationsEnabled(), actual.isEnableBatchedOperations()); assertEquals(expected.getAutoDeleteOnIdle(), actual.getAutoDeleteOnIdle()); assertEquals(expected.isPartitioningEnabled(), actual.isEnablePartitioning()); assertEquals(expectedStatus, actual.getStatus()); } private static void assertSubscriptionEquals(SubscriptionDescription expected, EntityStatus expectedStatus, SubscriptionDescription actual) { assertEquals(expected.getAutoDeleteOnIdle(), actual.getAutoDeleteOnIdle()); assertEquals(expected.getLockDuration(), actual.getLockDuration()); assertEquals(expected.isDeadLetteringOnFilterEvaluationExceptions(), actual.isDeadLetteringOnFilterEvaluationExceptions()); assertEquals(expected.isRequiresSession(), actual.isRequiresSession()); assertEquals(expected.getDefaultMessageTimeToLive(), actual.getDefaultMessageTimeToLive()); assertEquals(expected.isDeadLetteringOnMessageExpiration(), actual.isDeadLetteringOnMessageExpiration()); assertEquals(expected.getMaxDeliveryCount(), actual.getMaxDeliveryCount()); assertEquals(expected.isEnableBatchedOperations(), actual.isEnableBatchedOperations()); assertEquals(expected.getAutoDeleteOnIdle(), actual.getAutoDeleteOnIdle()); assertEquals(expectedStatus, actual.getStatus()); } private static void assertRuleEntryEquals(RuleDescriptionEntry expected, RuleDescriptionEntry actual) { if (expected == null) { assertNull(actual); return; } assertNotNull(actual); assertEquals(expected.getId(), actual.getId()); if (expected.getContent() == null) { assertNull(actual.getContent()); return; } assertNotNull(actual.getContent()); assertEquals(expected.getContent().getType(), actual.getContent().getType()); final RuleDescription expectedRule = expected.getContent().getRuleDescription(); final RuleDescription actualRule = actual.getContent().getRuleDescription(); assertNotNull(actualRule); assertRuleEquals(expectedRule, actualRule); } private static void assertRuleEquals(RuleDescription expected, RuleDescription actual) { if (expected == null) { assertNull(actual); return; } assertNotNull(actual); assertEquals(expected.getName(), actual.getName()); if (expected.getAction() instanceof EmptyRuleActionImpl) { assertTrue(actual.getAction() instanceof EmptyRuleActionImpl); } else if (expected.getAction() instanceof SqlRuleActionImpl) { assertTrue(actual.getAction() instanceof SqlRuleActionImpl); final SqlRuleActionImpl expectedAction = (SqlRuleActionImpl) expected.getAction(); final SqlRuleActionImpl actualAction = (SqlRuleActionImpl) actual.getAction(); assertEquals(expectedAction.getCompatibilityLevel(), actualAction.getCompatibilityLevel()); assertEquals(expectedAction.getSqlExpression(), actualAction.getSqlExpression()); assertEquals(expectedAction.isRequiresPreprocessing(), actualAction.isRequiresPreprocessing()); assertParameters(expectedAction.getParameters(), actualAction.getParameters()); } if (expected.getFilter() instanceof TrueFilterImpl) { assertTrue(actual.getFilter() instanceof TrueFilterImpl); } else if (expected.getFilter() instanceof FalseFilterImpl) { assertTrue(actual.getFilter() instanceof FalseFilterImpl); } if (expected.getFilter() instanceof SqlFilterImpl) { assertTrue(actual.getFilter() instanceof SqlFilterImpl); final SqlFilterImpl expectedFilter = (SqlFilterImpl) expected.getFilter(); final SqlFilterImpl actualFilter = (SqlFilterImpl) actual.getFilter(); assertEquals(expectedFilter.getCompatibilityLevel(), actualFilter.getCompatibilityLevel()); assertEquals(expectedFilter.getSqlExpression(), actualFilter.getSqlExpression()); assertParameters(expectedFilter.getParameters(), actualFilter.getParameters()); } else if (expected.getFilter() instanceof CorrelationFilterImpl) { assertTrue(actual.getFilter() instanceof CorrelationFilterImpl); final CorrelationFilterImpl expectedFilter = (CorrelationFilterImpl) expected.getFilter(); final CorrelationFilterImpl actualFilter = (CorrelationFilterImpl) actual.getFilter(); assertEquals(expectedFilter.getCorrelationId(), actualFilter.getCorrelationId()); assertEquals(expectedFilter.getMessageId(), actualFilter.getMessageId()); assertEquals(expectedFilter.getTo(), actualFilter.getTo()); assertEquals(expectedFilter.getReplyTo(), actualFilter.getReplyTo()); assertEquals(expectedFilter.getReplyToSessionId(), actualFilter.getReplyToSessionId()); assertEquals(expectedFilter.getSessionId(), actualFilter.getSessionId()); assertEquals(expectedFilter.getContentType(), actualFilter.getContentType()); assertParameters(expectedFilter.getProperties(), actualFilter.getProperties()); } } private static void assertParameters(List<KeyValueImpl> expected, List<KeyValueImpl> actual) { if (expected == null) { assertNull(actual); return; } assertNotNull(actual); assertEquals(expected.size(), actual.size()); final Map<String, KeyValueImpl> actualMap = actual.stream() .collect(Collectors.toMap(KeyValueImpl::getKey, Function.identity())); for (KeyValueImpl item : expected) { final KeyValueImpl removed = actualMap.remove(item.getKey()); assertNotNull(removed); assertEquals(item.getValue(), removed.getValue()); } assertTrue(actualMap.isEmpty()); } @SuppressWarnings("unchecked") private static void assertTitle(String expectedTitle, Object responseTitle) { assertTrue(responseTitle instanceof LinkedHashMap); final LinkedHashMap<String, String> map = (LinkedHashMap<String, String>) responseTitle; assertTrue(map.containsKey(TITLE_KEY)); assertEquals(expectedTitle, map.get(TITLE_KEY)); } @SuppressWarnings("unchecked") private static void assertResponseTitle(Object expectedResponseTitle, Object actualResponseTitle) { assertTrue(actualResponseTitle instanceof LinkedHashMap); final LinkedHashMap<String, String> actualMap = (LinkedHashMap<String, String>) actualResponseTitle; assertTrue(actualMap.containsKey(TITLE_KEY)); assertTitle(actualMap.get(TITLE_KEY), expectedResponseTitle); } private static LinkedHashMap<String, String> getResponseTitle(String entityName) { final LinkedHashMap<String, String> map = new LinkedHashMap<>(); map.put("", entityName); map.put("type", "text"); return map; } private static class TestAuthorizationRule implements AuthorizationRule { private final List<AccessRights> accessRights; private final String claimType; private final String claimValue; private final String keyName; private final OffsetDateTime createdAt; private final OffsetDateTime modifiedAt; private final String primaryKey; private final String secondaryKey; TestAuthorizationRule(AuthorizationRuleImpl rule) { this.accessRights = rule.getRights(); this.claimType = rule.getClaimType(); this.claimValue = rule.getClaimValue(); this.createdAt = rule.getCreatedTime(); this.keyName = rule.getKeyName(); this.modifiedAt = rule.getModifiedTime(); this.primaryKey = rule.getPrimaryKey(); this.secondaryKey = rule.getSecondaryKey(); } @Override public List<AccessRights> getAccessRights() { return accessRights; } @Override public String getClaimType() { return claimType; } @Override public String getClaimValue() { return claimValue; } @Override public OffsetDateTime getCreatedAt() { return createdAt; } @Override public String getKeyName() { return keyName; } @Override public OffsetDateTime getModifiedAt() { return modifiedAt; } @Override public String getPrimaryKey() { return primaryKey; } @Override public String getSecondaryKey() { return secondaryKey; } } }
we should try to normalize the preferredRegions before logging it in the ClientTelemetry, otherwise we may have many different values, toLowerCase and trim space, this is the same logic that the internal of the SDK uses as well for normalizing the preferred regions "East US 2" -> "eastus2"
public void serialize(ClientTelemetryInfo telemetry, JsonGenerator generator, SerializerProvider serializerProvider) throws IOException { generator.writeStartObject(); generator.writeStringField("timeStamp", telemetry.getTimeStamp()); generator.writeStringField("clientId", telemetry.getClientId()); if (telemetry.getProcessId() != null) { generator.writeStringField("processId", telemetry.getProcessId()); } if (telemetry.getUserAgent() != null) { generator.writeStringField("userAgent", telemetry.getUserAgent()); } generator.writeStringField("connectionMode", telemetry.getConnectionMode().toString()); generator.writeStringField("globalDatabaseAccountName", telemetry.getGlobalDatabaseAccountName()); if (telemetry.getApplicationRegion() != null) { generator.writeStringField("applicationRegion", telemetry.getApplicationRegion()); } if (telemetry.getHostEnvInfo() != null) { generator.writeStringField("hostEnvInfo", telemetry.getHostEnvInfo()); } if (telemetry.getAcceleratedNetworking() != null) { generator.writeStringField("acceleratedNetworking", telemetry.getAcceleratedNetworking().toString()); } if (telemetry.getPreferredRegions() != null && telemetry.getPreferredRegions().size() > 0) { generator.writeStringField("preferredRegions", telemetry.getPreferredRegions().toString()); } generator.writeNumberField("aggregationIntervalInSec", telemetry.getAggregationIntervalInSec()); generator.writeObjectField("systemInfo", telemetry.getSystemInfoMap().keySet()); generator.writeObjectField("cacheRefreshInfo", telemetry.getCacheRefreshInfoMap().keySet()); generator.writeObjectField("operationInfo", telemetry.getOperationInfoMap().keySet()); generator.writeEndObject(); }
telemetry.getPreferredRegions().toString());
public void serialize(ClientTelemetryInfo telemetry, JsonGenerator generator, SerializerProvider serializerProvider) throws IOException { generator.writeStartObject(); generator.writeStringField("timeStamp", telemetry.getTimeStamp()); generator.writeStringField("clientId", telemetry.getClientId()); if (telemetry.getProcessId() != null) { generator.writeStringField("processId", telemetry.getProcessId()); } if (telemetry.getUserAgent() != null) { generator.writeStringField("userAgent", telemetry.getUserAgent()); } generator.writeStringField("connectionMode", telemetry.getConnectionMode().toString()); generator.writeStringField("globalDatabaseAccountName", telemetry.getGlobalDatabaseAccountName()); if (telemetry.getApplicationRegion() != null) { generator.writeStringField("applicationRegion", telemetry.getApplicationRegion()); } if (telemetry.getHostEnvInfo() != null) { generator.writeStringField("hostEnvInfo", telemetry.getHostEnvInfo()); } if (telemetry.getAcceleratedNetworking() != null) { generator.writeStringField("acceleratedNetworking", telemetry.getAcceleratedNetworking().toString()); } if (telemetry.getPreferredRegions() != null && telemetry.getPreferredRegions().size() > 0) { generator.writeObjectField("preferredRegions", telemetry.getPreferredRegions()); } generator.writeNumberField("aggregationIntervalInSec", telemetry.getAggregationIntervalInSec()); generator.writeObjectField("systemInfo", telemetry.getSystemInfoMap().keySet()); generator.writeObjectField("cacheRefreshInfo", telemetry.getCacheRefreshInfoMap().keySet()); generator.writeObjectField("operationInfo", telemetry.getOperationInfoMap().keySet()); generator.writeEndObject(); }
class ClientTelemetrySerializer extends StdSerializer<ClientTelemetryInfo> { private static final long serialVersionUID = -2746532297176812860L; ClientTelemetrySerializer() { super(ClientTelemetryInfo.class); } @Override }
class ClientTelemetrySerializer extends StdSerializer<ClientTelemetryInfo> { private static final long serialVersionUID = -2746532297176812860L; ClientTelemetrySerializer() { super(ClientTelemetryInfo.class); } @Override }
Temporary disable due to bug https://github.com/Azure/azure-sdk-for-java/issues/25168
public void testSendToUserString() { BinaryData message = BinaryData.fromString("Hello World!"); assertResponse(client.sendToUserWithResponse("test_user", message, new RequestOptions().addRequestCallback(request -> request.getHeaders() .set("Content-Type", "text/plain")), Context.NONE), 202); assertResponse(client.sendToUserWithResponse("test_user", message, WebPubSubContentType.TEXT_PLAIN, message.getLength(), null, Context.NONE), 202); }
public void testSendToUserString() { BinaryData message = BinaryData.fromString("Hello World!"); assertResponse(client.sendToUserWithResponse("test_user", message, new RequestOptions().addRequestCallback(request -> request.getHeaders() .set("Content-Type", "text/plain")), Context.NONE), 202); assertResponse(client.sendToUserWithResponse("test_user", message, WebPubSubContentType.TEXT_PLAIN, message.getLength(), null, Context.NONE), 202); }
class WebPubSubServiceClientTests extends TestBase { private static final String DEFAULT_CONNECTION_STRING = "Endpoint=https: private static final String CONNECTION_STRING = Configuration.getGlobalConfiguration() .get("WEB_PUB_SUB_CS", DEFAULT_CONNECTION_STRING); private static final String ENDPOINT = Configuration.getGlobalConfiguration() .get("WEB_PUB_SUB_ENDPOINT", "https: private WebPubSubServiceClient client; private WebPubSubServiceAsyncClient asyncClient; @BeforeEach public void setup() { WebPubSubServiceClientBuilder webPubSubServiceClientBuilder = new WebPubSubServiceClientBuilder() .connectionString(CONNECTION_STRING) .httpClient(HttpClient.createDefault()) .hub("test"); if (getTestMode() == TestMode.PLAYBACK) { webPubSubServiceClientBuilder.httpClient(interceptorManager.getPlaybackClient()); } else if (getTestMode() == TestMode.RECORD) { webPubSubServiceClientBuilder.addPolicy(interceptorManager.getRecordPolicy()); } this.client = webPubSubServiceClientBuilder .buildClient(); this.asyncClient = webPubSubServiceClientBuilder .buildAsyncClient(); } private void assertResponse(Response<?> response, int expectedCode) { assertNotNull(response); assertEquals(expectedCode, response.getStatusCode()); } /***************************************************************************************************************** * Sync Tests - WebPubSubServiceClient ****************************************************************************************************************/ @Test public void assertClientNotNull() { assertNotNull(client); } @Test public void testBroadcastString() { assertResponse(client.sendToAllWithResponse( BinaryData.fromString("Hello World - Broadcast test!"), new RequestOptions().addRequestCallback(request -> request.getHeaders() .set("Content-Type", "text/plain")), Context.NONE), 202); } @Test public void testBroadcastBytes() { byte[] bytes = "Hello World - Broadcast test!".getBytes(); assertResponse(client.sendToAllWithResponse( BinaryData.fromBytes(bytes), new RequestOptions().addRequestCallback(request -> request.getHeaders() .set("Content-Type", "application/octet-stream")), Context.NONE), 202); } @Test @Test public void testSendToUserBytes() { assertResponse(client.sendToUserWithResponse("test_user", BinaryData.fromBytes("Hello World!".getBytes(StandardCharsets.UTF_8)), new RequestOptions().addRequestCallback(request -> request.getHeaders() .set("Content-Type", "application/octet-stream")), Context.NONE), 202); } @Test public void testSendToConnectionString() { assertResponse(client.sendToConnectionWithResponse("test_connection", BinaryData.fromString("Hello World!"), new RequestOptions().addRequestCallback(request -> request.getHeaders() .set("Content-Type", "text/plain")), Context.NONE), 202); } @Test public void testSendToConnectionBytes() { assertResponse(client.sendToConnectionWithResponse("test_connection", BinaryData.fromBytes("Hello World!".getBytes(StandardCharsets.UTF_8)), new RequestOptions().addRequestCallback(request -> request.getHeaders() .set("Content-Type", "application/octet-stream")), Context.NONE), 202); } @Test public void testSendToConnectionJson() { assertResponse(client.sendToConnectionWithResponse("test_connection", BinaryData.fromString("{\"data\": true}"), new RequestOptions() .addRequestCallback(request -> request.getHeaders().set("Content-Type", "application/json")), Context.NONE), 202); } @Test public void testSendToAllJson() { RequestOptions requestOptions = new RequestOptions().addRequestCallback(request -> request.getHeaders().set( "Content-Type", "application/json")); assertResponse(client.sendToAllWithResponse(BinaryData.fromString("{\"boolvalue\": true}"), requestOptions, Context.NONE), 202); assertResponse(client.sendToAllWithResponse(BinaryData.fromString("{\"stringvalue\": \"testingwebpubsub\"}"), requestOptions, Context.NONE), 202); assertResponse(client.sendToAllWithResponse(BinaryData.fromString("{\"intvalue\": 25}"), requestOptions, Context.NONE), 202); assertResponse(client.sendToAllWithResponse(BinaryData.fromString("{\"floatvalue\": 55.4}"), requestOptions, Context.NONE), 202); } @Test public void testRemoveNonExistentUserFromHub() { Response<Void> removeUserResponse = client.removeUserFromAllGroupsWithResponse("testRemoveNonExistentUserFromHub", new RequestOptions(), Context.NONE); assertEquals(200, removeUserResponse.getStatusCode()); } @Test @DoNotRecord(skipInPlayback = true) public void testGetAuthenticationToken() throws ParseException { WebPubSubClientAccessToken token = client.getClientAccessToken(new GetClientAccessTokenOptions()); Assertions.assertNotNull(token); Assertions.assertNotNull(token.getToken()); Assertions.assertNotNull(token.getUrl()); Assertions.assertTrue(token.getUrl().startsWith("wss: Assertions.assertTrue(token.getUrl().contains(".webpubsub.azure.com/client/hubs/")); String authToken = token.getToken(); JWT jwt = JWTParser.parse(authToken); JWTClaimsSet claimsSet = jwt.getJWTClaimsSet(); Assertions.assertNotNull(claimsSet); Assertions.assertNotNull(claimsSet.getAudience()); Assertions.assertFalse(claimsSet.getAudience().isEmpty()); String aud = claimsSet.getAudience().iterator().next(); Assertions.assertTrue(aud.contains(".webpubsub.azure.com/client/hubs/")); } /***************************************************************************************************************** * Sync Tests - WebPubSubGroup ****************************************************************************************************************/ @Test public void testRemoveNonExistentUserFromGroup() { assertResponse(client.removeUserFromGroupWithResponse("java", "testRemoveNonExistentUserFromGroup", new RequestOptions(), Context.NONE), 200); } @Test public void testSendMessageToGroup() { assertResponse(client.sendToGroupWithResponse("java", BinaryData.fromString("Hello World!"), new RequestOptions().addRequestCallback(request -> request.getHeaders() .set("Content-Type", "text/plain")), Context.NONE), 202); } @Test public void testAadCredential() { WebPubSubServiceClientBuilder webPubSubServiceClientBuilder = new WebPubSubServiceClientBuilder() .endpoint(ENDPOINT) .httpClient(HttpClient.createDefault()) .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) .hub("test"); if (getTestMode() == TestMode.PLAYBACK) { webPubSubServiceClientBuilder.httpClient(interceptorManager.getPlaybackClient()) .connectionString(CONNECTION_STRING); } else if (getTestMode() == TestMode.RECORD) { webPubSubServiceClientBuilder.addPolicy(interceptorManager.getRecordPolicy()) .credential(new DefaultAzureCredentialBuilder().build()); } else if (getTestMode() == TestMode.LIVE) { webPubSubServiceClientBuilder.credential(new DefaultAzureCredentialBuilder().build()); } this.client = webPubSubServiceClientBuilder.buildClient(); assertResponse(client.sendToUserWithResponse("test_user", BinaryData.fromString("Hello World!"), new RequestOptions().addRequestCallback(request -> request.getHeaders() .set("Content-Type", "text/plain")), Context.NONE), 202); } @Test public void testCheckPermission() { RequestOptions requestOptions = new RequestOptions() .addQueryParam("targetName", "group_name") .setThrowOnError(false); boolean permission = client.checkPermissionWithResponse(WebPubSubPermission.JOIN_LEAVE_GROUP, "connection_id", requestOptions, Context.NONE).getValue(); Assertions.assertFalse(permission); } }
class WebPubSubServiceClientTests extends TestBase { private static final String DEFAULT_CONNECTION_STRING = "Endpoint=https: private static final String CONNECTION_STRING = Configuration.getGlobalConfiguration() .get("WEB_PUB_SUB_CS", DEFAULT_CONNECTION_STRING); private static final String ENDPOINT = Configuration.getGlobalConfiguration() .get("WEB_PUB_SUB_ENDPOINT", "https: private WebPubSubServiceClient client; private WebPubSubServiceAsyncClient asyncClient; @BeforeEach public void setup() { WebPubSubServiceClientBuilder webPubSubServiceClientBuilder = new WebPubSubServiceClientBuilder() .connectionString(CONNECTION_STRING) .httpClient(HttpClient.createDefault()) .hub("test"); if (getTestMode() == TestMode.PLAYBACK) { webPubSubServiceClientBuilder.httpClient(interceptorManager.getPlaybackClient()); } else if (getTestMode() == TestMode.RECORD) { webPubSubServiceClientBuilder.addPolicy(interceptorManager.getRecordPolicy()); } this.client = webPubSubServiceClientBuilder .buildClient(); this.asyncClient = webPubSubServiceClientBuilder .buildAsyncClient(); } private void assertResponse(Response<?> response, int expectedCode) { assertNotNull(response); assertEquals(expectedCode, response.getStatusCode()); } /***************************************************************************************************************** * Sync Tests - WebPubSubServiceClient ****************************************************************************************************************/ @Test public void assertClientNotNull() { assertNotNull(client); } @Test public void testBroadcastString() { assertResponse(client.sendToAllWithResponse( BinaryData.fromString("Hello World - Broadcast test!"), new RequestOptions().addRequestCallback(request -> request.getHeaders() .set("Content-Type", "text/plain")), Context.NONE), 202); } @Test public void testBroadcastBytes() { byte[] bytes = "Hello World - Broadcast test!".getBytes(); assertResponse(client.sendToAllWithResponse( BinaryData.fromBytes(bytes), new RequestOptions().addRequestCallback(request -> request.getHeaders() .set("Content-Type", "application/octet-stream")), Context.NONE), 202); } @Test @Test public void testSendToUserBytes() { assertResponse(client.sendToUserWithResponse("test_user", BinaryData.fromBytes("Hello World!".getBytes(StandardCharsets.UTF_8)), new RequestOptions().addRequestCallback(request -> request.getHeaders() .set("Content-Type", "application/octet-stream")), Context.NONE), 202); } @Test public void testSendToConnectionString() { assertResponse(client.sendToConnectionWithResponse("test_connection", BinaryData.fromString("Hello World!"), new RequestOptions().addRequestCallback(request -> request.getHeaders() .set("Content-Type", "text/plain")), Context.NONE), 202); } @Test public void testSendToConnectionBytes() { assertResponse(client.sendToConnectionWithResponse("test_connection", BinaryData.fromBytes("Hello World!".getBytes(StandardCharsets.UTF_8)), new RequestOptions().addRequestCallback(request -> request.getHeaders() .set("Content-Type", "application/octet-stream")), Context.NONE), 202); } @Test public void testSendToConnectionJson() { assertResponse(client.sendToConnectionWithResponse("test_connection", BinaryData.fromString("{\"data\": true}"), new RequestOptions() .addRequestCallback(request -> request.getHeaders().set("Content-Type", "application/json")), Context.NONE), 202); } @Test public void testSendToAllJson() { RequestOptions requestOptions = new RequestOptions().addRequestCallback(request -> request.getHeaders().set( "Content-Type", "application/json")); assertResponse(client.sendToAllWithResponse(BinaryData.fromString("{\"boolvalue\": true}"), requestOptions, Context.NONE), 202); assertResponse(client.sendToAllWithResponse(BinaryData.fromString("{\"stringvalue\": \"testingwebpubsub\"}"), requestOptions, Context.NONE), 202); assertResponse(client.sendToAllWithResponse(BinaryData.fromString("{\"intvalue\": 25}"), requestOptions, Context.NONE), 202); assertResponse(client.sendToAllWithResponse(BinaryData.fromString("{\"floatvalue\": 55.4}"), requestOptions, Context.NONE), 202); } @Test public void testRemoveNonExistentUserFromHub() { Response<Void> removeUserResponse = client.removeUserFromAllGroupsWithResponse("testRemoveNonExistentUserFromHub", new RequestOptions(), Context.NONE); assertEquals(200, removeUserResponse.getStatusCode()); } @Test @DoNotRecord(skipInPlayback = true) public void testGetAuthenticationToken() throws ParseException { WebPubSubClientAccessToken token = client.getClientAccessToken(new GetClientAccessTokenOptions()); Assertions.assertNotNull(token); Assertions.assertNotNull(token.getToken()); Assertions.assertNotNull(token.getUrl()); Assertions.assertTrue(token.getUrl().startsWith("wss: Assertions.assertTrue(token.getUrl().contains(".webpubsub.azure.com/client/hubs/")); String authToken = token.getToken(); JWT jwt = JWTParser.parse(authToken); JWTClaimsSet claimsSet = jwt.getJWTClaimsSet(); Assertions.assertNotNull(claimsSet); Assertions.assertNotNull(claimsSet.getAudience()); Assertions.assertFalse(claimsSet.getAudience().isEmpty()); String aud = claimsSet.getAudience().iterator().next(); Assertions.assertTrue(aud.contains(".webpubsub.azure.com/client/hubs/")); } /***************************************************************************************************************** * Sync Tests - WebPubSubGroup ****************************************************************************************************************/ @Test public void testRemoveNonExistentUserFromGroup() { assertResponse(client.removeUserFromGroupWithResponse("java", "testRemoveNonExistentUserFromGroup", new RequestOptions(), Context.NONE), 200); } @Test public void testSendMessageToGroup() { assertResponse(client.sendToGroupWithResponse("java", BinaryData.fromString("Hello World!"), new RequestOptions().addRequestCallback(request -> request.getHeaders() .set("Content-Type", "text/plain")), Context.NONE), 202); } @Test public void testAadCredential() { WebPubSubServiceClientBuilder webPubSubServiceClientBuilder = new WebPubSubServiceClientBuilder() .endpoint(ENDPOINT) .httpClient(HttpClient.createDefault()) .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) .hub("test"); if (getTestMode() == TestMode.PLAYBACK) { webPubSubServiceClientBuilder.httpClient(interceptorManager.getPlaybackClient()) .connectionString(CONNECTION_STRING); } else if (getTestMode() == TestMode.RECORD) { webPubSubServiceClientBuilder.addPolicy(interceptorManager.getRecordPolicy()) .credential(new DefaultAzureCredentialBuilder().build()); } else if (getTestMode() == TestMode.LIVE) { webPubSubServiceClientBuilder.credential(new DefaultAzureCredentialBuilder().build()); } this.client = webPubSubServiceClientBuilder.buildClient(); assertResponse(client.sendToUserWithResponse("test_user", BinaryData.fromString("Hello World!"), new RequestOptions().addRequestCallback(request -> request.getHeaders() .set("Content-Type", "text/plain")), Context.NONE), 202); } @Test public void testCheckPermission() { RequestOptions requestOptions = new RequestOptions() .addQueryParam("targetName", "group_name") .setThrowOnError(false); boolean permission = client.checkPermissionWithResponse(WebPubSubPermission.JOIN_LEAVE_GROUP, "connection_id", requestOptions, Context.NONE).getValue(); Assertions.assertFalse(permission); } }
Couldn't we avoid creating the temporary stack if we moved the merge operation into the `Context` class itself?
public static Context mergeContexts(Context into, Context from) { Objects.requireNonNull(into, "'into' cannot be null."); Objects.requireNonNull(from, "'from' cannot be null."); Stack<Context> fromContextStack = from.getValueStack(); Context returnContext = into; while (!fromContextStack.empty()) { Context toAdd = fromContextStack.pop(); returnContext = returnContext.addData(toAdd.getKey(), toAdd.getValue()); } return returnContext; }
}
public static Context mergeContexts(Context into, Context from) { Objects.requireNonNull(into, "'into' cannot be null."); Objects.requireNonNull(from, "'from' cannot be null."); Context[] contextChain = from.getContextChain(); Context returnContext = into; for (Context toAdd : contextChain) { if (toAdd != null) { returnContext = returnContext.addData(toAdd.getKey(), toAdd.getValue()); } } return returnContext; }
class from an array of Objects. * * @param args Array of objects to search through to find the first instance of the given `clazz` type. * @param clazz The type trying to be found. * @param <T> Generic type * @return The first object of the desired type, otherwise null. */ public static <T> T findFirstOfType(Object[] args, Class<T> clazz) { if (isNullOrEmpty(args)) { return null; } for (Object arg : args) { if (clazz.isInstance(arg)) { return clazz.cast(arg); } } return null; }
class from an array of Objects. * * @param args Array of objects to search through to find the first instance of the given `clazz` type. * @param clazz The type trying to be found. * @param <T> Generic type * @return The first object of the desired type, otherwise null. */ public static <T> T findFirstOfType(Object[] args, Class<T> clazz) { if (isNullOrEmpty(args)) { return null; } for (Object arg : args) { if (clazz.isInstance(arg)) { return clazz.cast(arg); } } return null; }
Nice internal commentary!
public Object invoke(Object proxy, final Method method, Object[] args) { validateResumeOperationIsNotPresent(method); try { final SwaggerMethodParser methodParser = getMethodParser(method); final HttpRequest request = createHttpRequest(methodParser, args); Context context = methodParser.setContext(args); RequestOptions options = methodParser.setRequestOptions(args); context = mergeRequestOptionsContext(context, options); context = context.addData("caller-method", methodParser.getFullyQualifiedMethodName()) .addData("azure-eagerly-read-response", shouldEagerlyReadResponse(methodParser.getReturnType())); context = startTracingSpan(method, context); if (options != null) { options.getRequestCallback().accept(request); } if (request.getBody() != null) { request.setBody(validateLength(request)); } final Mono<HttpResponse> asyncResponse = send(request, context); Mono<HttpDecodedResponse> asyncDecodedResponse = this.decoder.decode(asyncResponse, methodParser); return handleRestReturnType(asyncDecodedResponse, methodParser, methodParser.getReturnType(), context, options); } catch (IOException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } }
public Object invoke(Object proxy, final Method method, Object[] args) { validateResumeOperationIsNotPresent(method); try { final SwaggerMethodParser methodParser = getMethodParser(method); final HttpRequest request = createHttpRequest(methodParser, args); Context context = methodParser.setContext(args); RequestOptions options = methodParser.setRequestOptions(args); context = mergeRequestOptionsContext(context, options); context = context.addData("caller-method", methodParser.getFullyQualifiedMethodName()) .addData("azure-eagerly-read-response", shouldEagerlyReadResponse(methodParser.getReturnType())); context = startTracingSpan(method, context); if (options != null) { options.getRequestCallback().accept(request); } if (request.getBody() != null) { request.setBody(validateLength(request)); } final Mono<HttpResponse> asyncResponse = send(request, context); Mono<HttpDecodedResponse> asyncDecodedResponse = this.decoder.decode(asyncResponse, methodParser); return handleRestReturnType(asyncDecodedResponse, methodParser, methodParser.getReturnType(), context, options); } catch (IOException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } }
class RestProxy implements InvocationHandler { private static final ByteBuffer VALIDATION_BUFFER = ByteBuffer.allocate(0); private static final String BODY_TOO_LARGE = "Request body emitted %d bytes, more than the expected %d bytes."; private static final String BODY_TOO_SMALL = "Request body emitted %d bytes, less than the expected %d bytes."; private static final String MUST_IMPLEMENT_PAGE_ERROR = "Unable to create PagedResponse<T>. Body must be of a type that implements: " + Page.class; private static final ResponseConstructorsCache RESPONSE_CONSTRUCTORS_CACHE = new ResponseConstructorsCache(); private final ClientLogger logger = new ClientLogger(RestProxy.class); private final HttpPipeline httpPipeline; private final SerializerAdapter serializer; private final SwaggerInterfaceParser interfaceParser; private final HttpResponseDecoder decoder; /** * Create a RestProxy. * * @param httpPipeline the HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP requests. * @param serializer the serializer that will be used to convert response bodies to POJOs. * @param interfaceParser the parser that contains information about the interface describing REST API methods that * this RestProxy "implements". */ private RestProxy(HttpPipeline httpPipeline, SerializerAdapter serializer, SwaggerInterfaceParser interfaceParser) { this.httpPipeline = httpPipeline; this.serializer = serializer; this.interfaceParser = interfaceParser; this.decoder = new HttpResponseDecoder(this.serializer); } /** * Get the SwaggerMethodParser for the provided method. The Method must exist on the Swagger interface that this * RestProxy was created to "implement". * * @param method the method to get a SwaggerMethodParser for * @return the SwaggerMethodParser for the provided method */ private SwaggerMethodParser getMethodParser(Method method) { return interfaceParser.getMethodParser(method); } /** * Send the provided request asynchronously, applying any request policies provided to the HttpClient instance. * * @param request the HTTP request to send * @param contextData the context * @return a {@link Mono} that emits HttpResponse asynchronously */ public Mono<HttpResponse> send(HttpRequest request, Context contextData) { return httpPipeline.send(request, contextData); } @Override @SuppressWarnings("deprecation") void validateResumeOperationIsNotPresent(Method method) { if (method.isAnnotationPresent(com.azure.core.annotation.ResumeOperation.class)) { throw logger.logExceptionAsError(Exceptions.propagate(new Exception("'ResumeOperation' isn't supported."))); } } static Context mergeRequestOptionsContext(Context context, RequestOptions options) { if (options == null) { return context; } Context optionsContext = options.getContext(); if (optionsContext != null && optionsContext != Context.NONE) { for (Map.Entry<Object, Object> kvp : optionsContext.getValues().entrySet()) { context = context.addData(kvp.getKey(), kvp.getValue()); } } return context; } static Flux<ByteBuffer> validateLength(final HttpRequest request) { final Flux<ByteBuffer> bbFlux = request.getBody(); if (bbFlux == null) { return Flux.empty(); } final long expectedLength = Long.parseLong(request.getHeaders().getValue("Content-Length")); return Flux.defer(() -> { final long[] currentTotalLength = new long[1]; return Flux.concat(bbFlux, Flux.just(VALIDATION_BUFFER)).handle((buffer, sink) -> { if (buffer == null) { return; } if (buffer == VALIDATION_BUFFER) { if (expectedLength != currentTotalLength[0]) { sink.error(new UnexpectedLengthException(String.format(BODY_TOO_SMALL, currentTotalLength[0], expectedLength), currentTotalLength[0], expectedLength)); } else { sink.complete(); } return; } currentTotalLength[0] += buffer.remaining(); if (currentTotalLength[0] > expectedLength) { sink.error(new UnexpectedLengthException(String.format(BODY_TOO_LARGE, currentTotalLength[0], expectedLength), currentTotalLength[0], expectedLength)); return; } sink.next(buffer); }); }); } /** * Starts the tracing span for the current service call, additionally set metadata attributes on the span by passing * additional context information. * * @param method Service method being called. * @param context Context information about the current service call. * @return The updated context containing the span context. */ private Context startTracingSpan(Method method, Context context) { if (!TracerProxy.isTracingEnabled()) { return context; } if ((boolean) context.getData(Tracer.DISABLE_TRACING_KEY).orElse(false)) { return context; } String spanName = interfaceParser.getServiceName() + "." + method.getName(); context = TracerProxy.setSpanName(spanName, context); return TracerProxy.start(spanName, context); } /** * Create a HttpRequest for the provided Swagger method using the provided arguments. * * @param methodParser the Swagger method parser to use * @param args the arguments to use to populate the method's annotation values * @return a HttpRequest * @throws IOException thrown if the body contents cannot be serialized */ private HttpRequest createHttpRequest(SwaggerMethodParser methodParser, Object[] args) throws IOException { final String path = methodParser.setPath(args); final UrlBuilder pathUrlBuilder = UrlBuilder.parse(path); final UrlBuilder urlBuilder; if (pathUrlBuilder.getScheme() != null) { urlBuilder = pathUrlBuilder; } else { urlBuilder = new UrlBuilder(); methodParser.setSchemeAndHost(args, urlBuilder); if (path != null && !path.isEmpty() && !"/".equals(path)) { String hostPath = urlBuilder.getPath(); if (hostPath == null || hostPath.isEmpty() || "/".equals(hostPath) || path.contains(": urlBuilder.setPath(path); } else { if (path.startsWith("/")) { urlBuilder.setPath(hostPath + path); } else { urlBuilder.setPath(hostPath + "/" + path); } } } } methodParser.setEncodedQueryParameters(args, urlBuilder); final URL url = urlBuilder.toUrl(); final HttpRequest request = configRequest(new HttpRequest(methodParser.getHttpMethod(), url), methodParser, args); HttpHeaders httpHeaders = request.getHeaders(); methodParser.setHeaders(args, httpHeaders); return request; } @SuppressWarnings("unchecked") private HttpRequest configRequest(final HttpRequest request, final SwaggerMethodParser methodParser, final Object[] args) throws IOException { final Object bodyContentObject = methodParser.setBody(args); if (bodyContentObject == null) { request.getHeaders().set("Content-Length", "0"); } else { String contentType = methodParser.getBodyContentType(); if (contentType == null || contentType.isEmpty()) { if (bodyContentObject instanceof byte[] || bodyContentObject instanceof String) { contentType = ContentType.APPLICATION_OCTET_STREAM; } else { contentType = ContentType.APPLICATION_JSON; } } request.getHeaders().set("Content-Type", contentType); if (bodyContentObject instanceof BinaryData) { BinaryData binaryData = (BinaryData) bodyContentObject; if (binaryData.getLength() != null) { request.setHeader("Content-Length", binaryData.getLength().toString()); } request.setBody(binaryData.toFluxByteBuffer()); return request; } boolean isJson = false; final String[] contentTypeParts = contentType.split(";"); for (final String contentTypePart : contentTypeParts) { if (contentTypePart.trim().equalsIgnoreCase(ContentType.APPLICATION_JSON)) { isJson = true; break; } } if (isJson) { ByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(); serializer.serialize(bodyContentObject, SerializerEncoding.JSON, stream); request.setHeader("Content-Length", String.valueOf(stream.size())); request.setBody(Flux.defer(() -> Flux.just(ByteBuffer.wrap(stream.toByteArray(), 0, stream.size())))); } else if (FluxUtil.isFluxByteBuffer(methodParser.getBodyJavaType())) { request.setBody((Flux<ByteBuffer>) bodyContentObject); } else if (bodyContentObject instanceof byte[]) { request.setBody((byte[]) bodyContentObject); } else if (bodyContentObject instanceof String) { final String bodyContentString = (String) bodyContentObject; if (!bodyContentString.isEmpty()) { request.setBody(bodyContentString); } } else if (bodyContentObject instanceof ByteBuffer) { request.setBody(Flux.just((ByteBuffer) bodyContentObject)); } else { ByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(); serializer.serialize(bodyContentObject, SerializerEncoding.fromHeaders(request.getHeaders()), stream); request.setHeader("Content-Length", String.valueOf(stream.size())); request.setBody(Flux.defer(() -> Flux.just(ByteBuffer.wrap(stream.toByteArray(), 0, stream.size())))); } } return request; } private Mono<HttpDecodedResponse> ensureExpectedStatus(final Mono<HttpDecodedResponse> asyncDecodedResponse, final SwaggerMethodParser methodParser, RequestOptions options) { return asyncDecodedResponse .flatMap(decodedHttpResponse -> ensureExpectedStatus(decodedHttpResponse, methodParser, options)); } private static Exception instantiateUnexpectedException(final UnexpectedExceptionInformation exception, final HttpResponse httpResponse, final byte[] responseContent, final Object responseDecodedContent) { final int responseStatusCode = httpResponse.getStatusCode(); final String contentType = httpResponse.getHeaderValue("Content-Type"); final String bodyRepresentation; if ("application/octet-stream".equalsIgnoreCase(contentType)) { bodyRepresentation = "(" + httpResponse.getHeaderValue("Content-Length") + "-byte body)"; } else { bodyRepresentation = responseContent == null || responseContent.length == 0 ? "(empty body)" : "\"" + new String(responseContent, StandardCharsets.UTF_8) + "\""; } Exception result; try { final Constructor<? extends HttpResponseException> exceptionConstructor = exception.getExceptionType() .getConstructor(String.class, HttpResponse.class, exception.getExceptionBodyType()); result = exceptionConstructor.newInstance("Status code " + responseStatusCode + ", " + bodyRepresentation, httpResponse, responseDecodedContent); } catch (ReflectiveOperationException e) { String message = "Status code " + responseStatusCode + ", but an instance of " + exception.getExceptionType().getCanonicalName() + " cannot be created." + " Response body: " + bodyRepresentation; result = new IOException(message, e); } return result; } /** * Create a publisher that (1) emits error if the provided response {@code decodedResponse} has 'disallowed status * code' OR (2) emits provided response if it's status code ia allowed. * * 'disallowed status code' is one of the status code defined in the provided SwaggerMethodParser or is in the int[] * of additional allowed status codes. * * @param decodedResponse The HttpResponse to check. * @param methodParser The method parser that contains information about the service interface method that initiated * the HTTP request. * @return An async-version of the provided decodedResponse. */ private Mono<HttpDecodedResponse> ensureExpectedStatus(final HttpDecodedResponse decodedResponse, final SwaggerMethodParser methodParser, RequestOptions options) { final int responseStatusCode = decodedResponse.getSourceResponse().getStatusCode(); if (methodParser.isExpectedResponseStatusCode(responseStatusCode) || (options != null && options.getErrorOptions().contains(ErrorOptions.NO_THROW))) { return Mono.just(decodedResponse); } return decodedResponse.getSourceResponse().getBodyAsByteArray() .switchIfEmpty(Mono.defer(() -> { return Mono.error(instantiateUnexpectedException( methodParser.getUnexpectedException(responseStatusCode), decodedResponse.getSourceResponse(), null, null)); })) .flatMap(responseBytes -> decodedResponse.getDecodedBody(responseBytes) .switchIfEmpty(Mono.defer(() -> { return Mono.error(instantiateUnexpectedException( methodParser.getUnexpectedException(responseStatusCode), decodedResponse.getSourceResponse(), responseBytes, null)); })) .flatMap(decodedBody -> { return Mono.error(instantiateUnexpectedException( methodParser.getUnexpectedException(responseStatusCode), decodedResponse.getSourceResponse(), responseBytes, decodedBody)); })); } private Mono<?> handleRestResponseReturnType(final HttpDecodedResponse response, final SwaggerMethodParser methodParser, final Type entityType) { if (TypeUtil.isTypeOrSubTypeOf(entityType, Response.class)) { final Type bodyType = TypeUtil.getRestResponseBodyType(entityType); if (TypeUtil.isTypeOrSubTypeOf(bodyType, Void.class)) { return response.getSourceResponse().getBody().ignoreElements() .then(createResponse(response, entityType, null)); } else { return handleBodyReturnType(response, methodParser, bodyType) .flatMap(bodyAsObject -> createResponse(response, entityType, bodyAsObject)) .switchIfEmpty(Mono.defer((Supplier<Mono<Response<?>>>) () -> createResponse(response, entityType, null))); } } else { return handleBodyReturnType(response, methodParser, entityType); } } @SuppressWarnings("unchecked") private Mono<Response<?>> createResponse(HttpDecodedResponse response, Type entityType, Object bodyAsObject) { Class<? extends Response<?>> cls = (Class<? extends Response<?>>) TypeUtil.getRawClass(entityType); if (cls.equals(Response.class)) { cls = (Class<? extends Response<?>>) (Object) ResponseBase.class; } else if (cls.equals(PagedResponse.class)) { cls = (Class<? extends Response<?>>) (Object) PagedResponseBase.class; if (bodyAsObject != null && !TypeUtil.isTypeOrSubTypeOf(bodyAsObject.getClass(), Page.class)) { return monoError(logger, new RuntimeException(MUST_IMPLEMENT_PAGE_ERROR)); } } final Class<? extends Response<?>> clsFinal = cls; return Mono.just(RESPONSE_CONSTRUCTORS_CACHE.get(clsFinal)) .switchIfEmpty(Mono.defer(() -> Mono.error(new RuntimeException("Cannot find suitable constructor for class " + clsFinal)))) .flatMap(ctr -> RESPONSE_CONSTRUCTORS_CACHE.invoke(ctr, response, bodyAsObject)); } private Mono<?> handleBodyReturnType(final HttpDecodedResponse response, final SwaggerMethodParser methodParser, final Type entityType) { final int responseStatusCode = response.getSourceResponse().getStatusCode(); final HttpMethod httpMethod = methodParser.getHttpMethod(); final Type returnValueWireType = methodParser.getReturnValueWireType(); final Mono<?> asyncResult; if (httpMethod == HttpMethod.HEAD && (TypeUtil.isTypeOrSubTypeOf( entityType, Boolean.TYPE) || TypeUtil.isTypeOrSubTypeOf(entityType, Boolean.class))) { boolean isSuccess = (responseStatusCode / 100) == 2; asyncResult = Mono.just(isSuccess); } else if (TypeUtil.isTypeOrSubTypeOf(entityType, byte[].class)) { Mono<byte[]> responseBodyBytesAsync = response.getSourceResponse().getBodyAsByteArray(); if (returnValueWireType == Base64Url.class) { responseBodyBytesAsync = responseBodyBytesAsync .mapNotNull(base64UrlBytes -> new Base64Url(base64UrlBytes).decodedBytes()); } asyncResult = responseBodyBytesAsync; } else if (FluxUtil.isFluxByteBuffer(entityType)) { asyncResult = Mono.just(response.getSourceResponse().getBody()); } else if (TypeUtil.isTypeOrSubTypeOf(entityType, BinaryData.class)) { asyncResult = BinaryData.fromFlux(response.getSourceResponse().getBody()); } else { asyncResult = response.getDecodedBody((byte[]) null); } return asyncResult; } /** * Handle the provided asynchronous HTTP response and return the deserialized value. * * @param asyncHttpDecodedResponse the asynchronous HTTP response to the original HTTP request * @param methodParser the SwaggerMethodParser that the request originates from * @param returnType the type of value that will be returned * @param context Additional context that is passed through the Http pipeline during the service call. * @return the deserialized result */ private Object handleRestReturnType(final Mono<HttpDecodedResponse> asyncHttpDecodedResponse, final SwaggerMethodParser methodParser, final Type returnType, final Context context, final RequestOptions options) { final Mono<HttpDecodedResponse> asyncExpectedResponse = ensureExpectedStatus(asyncHttpDecodedResponse, methodParser, options) .doOnEach(RestProxy::endTracingSpan) .contextWrite(reactor.util.context.Context.of("TRACING_CONTEXT", context)); final Object result; if (TypeUtil.isTypeOrSubTypeOf(returnType, Mono.class)) { final Type monoTypeParam = TypeUtil.getTypeArgument(returnType); if (TypeUtil.isTypeOrSubTypeOf(monoTypeParam, Void.class)) { result = asyncExpectedResponse.then(); } else { result = asyncExpectedResponse.flatMap(response -> handleRestResponseReturnType(response, methodParser, monoTypeParam)); } } else if (FluxUtil.isFluxByteBuffer(returnType)) { result = asyncExpectedResponse.flatMapMany(ar -> ar.getSourceResponse().getBody()); } else if (TypeUtil.isTypeOrSubTypeOf(returnType, void.class) || TypeUtil.isTypeOrSubTypeOf(returnType, Void.class)) { asyncExpectedResponse.block(); result = null; } else { result = asyncExpectedResponse .flatMap(httpResponse -> handleRestResponseReturnType(httpResponse, methodParser, returnType)) .block(); } return result; } private static void endTracingSpan(Signal<HttpDecodedResponse> signal) { if (!TracerProxy.isTracingEnabled()) { return; } if (signal.isOnComplete() || signal.isOnSubscribe()) { return; } ContextView context = signal.getContextView(); Optional<Context> tracingContext = context.getOrEmpty("TRACING_CONTEXT"); boolean disableTracing = Boolean.TRUE.equals(context.getOrDefault(Tracer.DISABLE_TRACING_KEY, false)); if (!tracingContext.isPresent() || disableTracing) { return; } int statusCode = 0; HttpDecodedResponse httpDecodedResponse; Throwable throwable = null; if (signal.hasValue()) { httpDecodedResponse = signal.get(); statusCode = httpDecodedResponse.getSourceResponse().getStatusCode(); } else if (signal.hasError()) { throwable = signal.getThrowable(); if (throwable instanceof HttpResponseException) { HttpResponseException exception = (HttpResponseException) throwable; statusCode = exception.getResponse().getStatusCode(); } } TracerProxy.end(statusCode, throwable, tracingContext.get()); } /** * Create an instance of the default serializer. * * @return the default serializer */ private static SerializerAdapter createDefaultSerializer() { return JacksonAdapter.createDefaultSerializerAdapter(); } /** * Create the default HttpPipeline. * * @return the default HttpPipeline */ private static HttpPipeline createDefaultPipeline() { List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new UserAgentPolicy()); policies.add(new RetryPolicy()); policies.add(new CookiePolicy()); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param <A> the type of the Swagger interface * @return a proxy implementation of the provided Swagger interface */ public static <A> A create(Class<A> swaggerInterface) { return create(swaggerInterface, createDefaultPipeline(), createDefaultSerializer()); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param httpPipeline the HttpPipelinePolicy and HttpClient pipeline that will be used to send Http requests * @param <A> the type of the Swagger interface * @return a proxy implementation of the provided Swagger interface */ public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline) { return create(swaggerInterface, httpPipeline, createDefaultSerializer()); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param httpPipeline the HttpPipelinePolicy and HttpClient pipline that will be used to send Http requests * @param serializer the serializer that will be used to convert POJOs to and from request and response bodies * @param <A> the type of the Swagger interface. * @return a proxy implementation of the provided Swagger interface */ @SuppressWarnings("unchecked") public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline, SerializerAdapter serializer) { final SwaggerInterfaceParser interfaceParser = new SwaggerInterfaceParser(swaggerInterface, serializer); final RestProxy restProxy = new RestProxy(httpPipeline, serializer, interfaceParser); return (A) Proxy.newProxyInstance(swaggerInterface.getClassLoader(), new Class<?>[]{swaggerInterface}, restProxy); } }
class RestProxy implements InvocationHandler { private static final ByteBuffer VALIDATION_BUFFER = ByteBuffer.allocate(0); private static final String BODY_TOO_LARGE = "Request body emitted %d bytes, more than the expected %d bytes."; private static final String BODY_TOO_SMALL = "Request body emitted %d bytes, less than the expected %d bytes."; private static final String MUST_IMPLEMENT_PAGE_ERROR = "Unable to create PagedResponse<T>. Body must be of a type that implements: " + Page.class; private static final ResponseConstructorsCache RESPONSE_CONSTRUCTORS_CACHE = new ResponseConstructorsCache(); private final ClientLogger logger = new ClientLogger(RestProxy.class); private final HttpPipeline httpPipeline; private final SerializerAdapter serializer; private final SwaggerInterfaceParser interfaceParser; private final HttpResponseDecoder decoder; /** * Create a RestProxy. * * @param httpPipeline the HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP requests. * @param serializer the serializer that will be used to convert response bodies to POJOs. * @param interfaceParser the parser that contains information about the interface describing REST API methods that * this RestProxy "implements". */ private RestProxy(HttpPipeline httpPipeline, SerializerAdapter serializer, SwaggerInterfaceParser interfaceParser) { this.httpPipeline = httpPipeline; this.serializer = serializer; this.interfaceParser = interfaceParser; this.decoder = new HttpResponseDecoder(this.serializer); } /** * Get the SwaggerMethodParser for the provided method. The Method must exist on the Swagger interface that this * RestProxy was created to "implement". * * @param method the method to get a SwaggerMethodParser for * @return the SwaggerMethodParser for the provided method */ private SwaggerMethodParser getMethodParser(Method method) { return interfaceParser.getMethodParser(method); } /** * Send the provided request asynchronously, applying any request policies provided to the HttpClient instance. * * @param request the HTTP request to send * @param contextData the context * @return a {@link Mono} that emits HttpResponse asynchronously */ public Mono<HttpResponse> send(HttpRequest request, Context contextData) { return httpPipeline.send(request, contextData); } @Override @SuppressWarnings("deprecation") void validateResumeOperationIsNotPresent(Method method) { if (method.isAnnotationPresent(com.azure.core.annotation.ResumeOperation.class)) { throw logger.logExceptionAsError(Exceptions.propagate(new Exception("'ResumeOperation' isn't supported."))); } } static Context mergeRequestOptionsContext(Context context, RequestOptions options) { if (options == null) { return context; } Context optionsContext = options.getContext(); if (optionsContext != null && optionsContext != Context.NONE) { for (Map.Entry<Object, Object> kvp : optionsContext.getValues().entrySet()) { context = context.addData(kvp.getKey(), kvp.getValue()); } } return context; } static Flux<ByteBuffer> validateLength(final HttpRequest request) { final Flux<ByteBuffer> bbFlux = request.getBody(); if (bbFlux == null) { return Flux.empty(); } final long expectedLength = Long.parseLong(request.getHeaders().getValue("Content-Length")); return Flux.defer(() -> { final long[] currentTotalLength = new long[1]; return Flux.concat(bbFlux, Flux.just(VALIDATION_BUFFER)).handle((buffer, sink) -> { if (buffer == null) { return; } if (buffer == VALIDATION_BUFFER) { if (expectedLength != currentTotalLength[0]) { sink.error(new UnexpectedLengthException(String.format(BODY_TOO_SMALL, currentTotalLength[0], expectedLength), currentTotalLength[0], expectedLength)); } else { sink.complete(); } return; } currentTotalLength[0] += buffer.remaining(); if (currentTotalLength[0] > expectedLength) { sink.error(new UnexpectedLengthException(String.format(BODY_TOO_LARGE, currentTotalLength[0], expectedLength), currentTotalLength[0], expectedLength)); return; } sink.next(buffer); }); }); } /** * Starts the tracing span for the current service call, additionally set metadata attributes on the span by passing * additional context information. * * @param method Service method being called. * @param context Context information about the current service call. * @return The updated context containing the span context. */ private Context startTracingSpan(Method method, Context context) { if (!TracerProxy.isTracingEnabled()) { return context; } if ((boolean) context.getData(Tracer.DISABLE_TRACING_KEY).orElse(false)) { return context; } String spanName = interfaceParser.getServiceName() + "." + method.getName(); context = TracerProxy.setSpanName(spanName, context); return TracerProxy.start(spanName, context); } /** * Create a HttpRequest for the provided Swagger method using the provided arguments. * * @param methodParser the Swagger method parser to use * @param args the arguments to use to populate the method's annotation values * @return a HttpRequest * @throws IOException thrown if the body contents cannot be serialized */ private HttpRequest createHttpRequest(SwaggerMethodParser methodParser, Object[] args) throws IOException { final String path = methodParser.setPath(args); final UrlBuilder pathUrlBuilder = UrlBuilder.parse(path); final UrlBuilder urlBuilder; if (pathUrlBuilder.getScheme() != null) { urlBuilder = pathUrlBuilder; } else { urlBuilder = new UrlBuilder(); methodParser.setSchemeAndHost(args, urlBuilder); if (path != null && !path.isEmpty() && !"/".equals(path)) { String hostPath = urlBuilder.getPath(); if (hostPath == null || hostPath.isEmpty() || "/".equals(hostPath) || path.contains(": urlBuilder.setPath(path); } else { if (path.startsWith("/")) { urlBuilder.setPath(hostPath + path); } else { urlBuilder.setPath(hostPath + "/" + path); } } } } methodParser.setEncodedQueryParameters(args, urlBuilder); final URL url = urlBuilder.toUrl(); final HttpRequest request = configRequest(new HttpRequest(methodParser.getHttpMethod(), url), methodParser, args); HttpHeaders httpHeaders = request.getHeaders(); methodParser.setHeaders(args, httpHeaders); return request; } @SuppressWarnings("unchecked") private HttpRequest configRequest(final HttpRequest request, final SwaggerMethodParser methodParser, final Object[] args) throws IOException { final Object bodyContentObject = methodParser.setBody(args); if (bodyContentObject == null) { request.getHeaders().set("Content-Length", "0"); } else { String contentType = methodParser.getBodyContentType(); if (contentType == null || contentType.isEmpty()) { if (bodyContentObject instanceof byte[] || bodyContentObject instanceof String) { contentType = ContentType.APPLICATION_OCTET_STREAM; } else { contentType = ContentType.APPLICATION_JSON; } } request.getHeaders().set("Content-Type", contentType); if (bodyContentObject instanceof BinaryData) { BinaryData binaryData = (BinaryData) bodyContentObject; if (binaryData.getLength() != null) { request.setHeader("Content-Length", binaryData.getLength().toString()); } request.setBody(binaryData.toFluxByteBuffer()); return request; } boolean isJson = false; final String[] contentTypeParts = contentType.split(";"); for (final String contentTypePart : contentTypeParts) { if (contentTypePart.trim().equalsIgnoreCase(ContentType.APPLICATION_JSON)) { isJson = true; break; } } if (isJson) { ByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(); serializer.serialize(bodyContentObject, SerializerEncoding.JSON, stream); request.setHeader("Content-Length", String.valueOf(stream.size())); request.setBody(Flux.defer(() -> Flux.just(ByteBuffer.wrap(stream.toByteArray(), 0, stream.size())))); } else if (FluxUtil.isFluxByteBuffer(methodParser.getBodyJavaType())) { request.setBody((Flux<ByteBuffer>) bodyContentObject); } else if (bodyContentObject instanceof byte[]) { request.setBody((byte[]) bodyContentObject); } else if (bodyContentObject instanceof String) { final String bodyContentString = (String) bodyContentObject; if (!bodyContentString.isEmpty()) { request.setBody(bodyContentString); } } else if (bodyContentObject instanceof ByteBuffer) { request.setBody(Flux.just((ByteBuffer) bodyContentObject)); } else { ByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(); serializer.serialize(bodyContentObject, SerializerEncoding.fromHeaders(request.getHeaders()), stream); request.setHeader("Content-Length", String.valueOf(stream.size())); request.setBody(Flux.defer(() -> Flux.just(ByteBuffer.wrap(stream.toByteArray(), 0, stream.size())))); } } return request; } private Mono<HttpDecodedResponse> ensureExpectedStatus(final Mono<HttpDecodedResponse> asyncDecodedResponse, final SwaggerMethodParser methodParser, RequestOptions options) { return asyncDecodedResponse .flatMap(decodedHttpResponse -> ensureExpectedStatus(decodedHttpResponse, methodParser, options)); } private static Exception instantiateUnexpectedException(final UnexpectedExceptionInformation exception, final HttpResponse httpResponse, final byte[] responseContent, final Object responseDecodedContent) { final int responseStatusCode = httpResponse.getStatusCode(); final String contentType = httpResponse.getHeaderValue("Content-Type"); final String bodyRepresentation; if ("application/octet-stream".equalsIgnoreCase(contentType)) { bodyRepresentation = "(" + httpResponse.getHeaderValue("Content-Length") + "-byte body)"; } else { bodyRepresentation = responseContent == null || responseContent.length == 0 ? "(empty body)" : "\"" + new String(responseContent, StandardCharsets.UTF_8) + "\""; } Exception result; try { final Constructor<? extends HttpResponseException> exceptionConstructor = exception.getExceptionType() .getConstructor(String.class, HttpResponse.class, exception.getExceptionBodyType()); result = exceptionConstructor.newInstance("Status code " + responseStatusCode + ", " + bodyRepresentation, httpResponse, responseDecodedContent); } catch (ReflectiveOperationException e) { String message = "Status code " + responseStatusCode + ", but an instance of " + exception.getExceptionType().getCanonicalName() + " cannot be created." + " Response body: " + bodyRepresentation; result = new IOException(message, e); } return result; } /** * Create a publisher that (1) emits error if the provided response {@code decodedResponse} has 'disallowed status * code' OR (2) emits provided response if it's status code ia allowed. * * 'disallowed status code' is one of the status code defined in the provided SwaggerMethodParser or is in the int[] * of additional allowed status codes. * * @param decodedResponse The HttpResponse to check. * @param methodParser The method parser that contains information about the service interface method that initiated * the HTTP request. * @return An async-version of the provided decodedResponse. */ private Mono<HttpDecodedResponse> ensureExpectedStatus(final HttpDecodedResponse decodedResponse, final SwaggerMethodParser methodParser, RequestOptions options) { final int responseStatusCode = decodedResponse.getSourceResponse().getStatusCode(); if (methodParser.isExpectedResponseStatusCode(responseStatusCode) || (options != null && options.getErrorOptions().contains(ErrorOptions.NO_THROW))) { return Mono.just(decodedResponse); } return decodedResponse.getSourceResponse().getBodyAsByteArray() .switchIfEmpty(Mono.defer(() -> { return Mono.error(instantiateUnexpectedException( methodParser.getUnexpectedException(responseStatusCode), decodedResponse.getSourceResponse(), null, null)); })) .flatMap(responseBytes -> decodedResponse.getDecodedBody(responseBytes) .switchIfEmpty(Mono.defer(() -> { return Mono.error(instantiateUnexpectedException( methodParser.getUnexpectedException(responseStatusCode), decodedResponse.getSourceResponse(), responseBytes, null)); })) .flatMap(decodedBody -> { return Mono.error(instantiateUnexpectedException( methodParser.getUnexpectedException(responseStatusCode), decodedResponse.getSourceResponse(), responseBytes, decodedBody)); })); } private Mono<?> handleRestResponseReturnType(final HttpDecodedResponse response, final SwaggerMethodParser methodParser, final Type entityType) { if (TypeUtil.isTypeOrSubTypeOf(entityType, Response.class)) { final Type bodyType = TypeUtil.getRestResponseBodyType(entityType); if (TypeUtil.isTypeOrSubTypeOf(bodyType, Void.class)) { return response.getSourceResponse().getBody().ignoreElements() .then(createResponse(response, entityType, null)); } else { return handleBodyReturnType(response, methodParser, bodyType) .flatMap(bodyAsObject -> createResponse(response, entityType, bodyAsObject)) .switchIfEmpty(Mono.defer((Supplier<Mono<Response<?>>>) () -> createResponse(response, entityType, null))); } } else { return handleBodyReturnType(response, methodParser, entityType); } } @SuppressWarnings("unchecked") private Mono<Response<?>> createResponse(HttpDecodedResponse response, Type entityType, Object bodyAsObject) { Class<? extends Response<?>> cls = (Class<? extends Response<?>>) TypeUtil.getRawClass(entityType); if (cls.equals(Response.class)) { cls = (Class<? extends Response<?>>) (Object) ResponseBase.class; } else if (cls.equals(PagedResponse.class)) { cls = (Class<? extends Response<?>>) (Object) PagedResponseBase.class; if (bodyAsObject != null && !TypeUtil.isTypeOrSubTypeOf(bodyAsObject.getClass(), Page.class)) { return monoError(logger, new RuntimeException(MUST_IMPLEMENT_PAGE_ERROR)); } } final Class<? extends Response<?>> clsFinal = cls; return Mono.just(RESPONSE_CONSTRUCTORS_CACHE.get(clsFinal)) .switchIfEmpty(Mono.defer(() -> Mono.error(new RuntimeException("Cannot find suitable constructor for class " + clsFinal)))) .flatMap(ctr -> RESPONSE_CONSTRUCTORS_CACHE.invoke(ctr, response, bodyAsObject)); } private Mono<?> handleBodyReturnType(final HttpDecodedResponse response, final SwaggerMethodParser methodParser, final Type entityType) { final int responseStatusCode = response.getSourceResponse().getStatusCode(); final HttpMethod httpMethod = methodParser.getHttpMethod(); final Type returnValueWireType = methodParser.getReturnValueWireType(); final Mono<?> asyncResult; if (httpMethod == HttpMethod.HEAD && (TypeUtil.isTypeOrSubTypeOf( entityType, Boolean.TYPE) || TypeUtil.isTypeOrSubTypeOf(entityType, Boolean.class))) { boolean isSuccess = (responseStatusCode / 100) == 2; asyncResult = Mono.just(isSuccess); } else if (TypeUtil.isTypeOrSubTypeOf(entityType, byte[].class)) { Mono<byte[]> responseBodyBytesAsync = response.getSourceResponse().getBodyAsByteArray(); if (returnValueWireType == Base64Url.class) { responseBodyBytesAsync = responseBodyBytesAsync .mapNotNull(base64UrlBytes -> new Base64Url(base64UrlBytes).decodedBytes()); } asyncResult = responseBodyBytesAsync; } else if (FluxUtil.isFluxByteBuffer(entityType)) { asyncResult = Mono.just(response.getSourceResponse().getBody()); } else if (TypeUtil.isTypeOrSubTypeOf(entityType, BinaryData.class)) { asyncResult = BinaryData.fromFlux(response.getSourceResponse().getBody()); } else { asyncResult = response.getDecodedBody((byte[]) null); } return asyncResult; } /** * Handle the provided asynchronous HTTP response and return the deserialized value. * * @param asyncHttpDecodedResponse the asynchronous HTTP response to the original HTTP request * @param methodParser the SwaggerMethodParser that the request originates from * @param returnType the type of value that will be returned * @param context Additional context that is passed through the Http pipeline during the service call. * @return the deserialized result */ private Object handleRestReturnType(final Mono<HttpDecodedResponse> asyncHttpDecodedResponse, final SwaggerMethodParser methodParser, final Type returnType, final Context context, final RequestOptions options) { final Mono<HttpDecodedResponse> asyncExpectedResponse = ensureExpectedStatus(asyncHttpDecodedResponse, methodParser, options) .doOnEach(RestProxy::endTracingSpan) .contextWrite(reactor.util.context.Context.of("TRACING_CONTEXT", context)); final Object result; if (TypeUtil.isTypeOrSubTypeOf(returnType, Mono.class)) { final Type monoTypeParam = TypeUtil.getTypeArgument(returnType); if (TypeUtil.isTypeOrSubTypeOf(monoTypeParam, Void.class)) { result = asyncExpectedResponse.then(); } else { result = asyncExpectedResponse.flatMap(response -> handleRestResponseReturnType(response, methodParser, monoTypeParam)); } } else if (FluxUtil.isFluxByteBuffer(returnType)) { result = asyncExpectedResponse.flatMapMany(ar -> ar.getSourceResponse().getBody()); } else if (TypeUtil.isTypeOrSubTypeOf(returnType, void.class) || TypeUtil.isTypeOrSubTypeOf(returnType, Void.class)) { asyncExpectedResponse.block(); result = null; } else { result = asyncExpectedResponse .flatMap(httpResponse -> handleRestResponseReturnType(httpResponse, methodParser, returnType)) .block(); } return result; } private static void endTracingSpan(Signal<HttpDecodedResponse> signal) { if (!TracerProxy.isTracingEnabled()) { return; } if (signal.isOnComplete() || signal.isOnSubscribe()) { return; } ContextView context = signal.getContextView(); Optional<Context> tracingContext = context.getOrEmpty("TRACING_CONTEXT"); boolean disableTracing = Boolean.TRUE.equals(context.getOrDefault(Tracer.DISABLE_TRACING_KEY, false)); if (!tracingContext.isPresent() || disableTracing) { return; } int statusCode = 0; HttpDecodedResponse httpDecodedResponse; Throwable throwable = null; if (signal.hasValue()) { httpDecodedResponse = signal.get(); statusCode = httpDecodedResponse.getSourceResponse().getStatusCode(); } else if (signal.hasError()) { throwable = signal.getThrowable(); if (throwable instanceof HttpResponseException) { HttpResponseException exception = (HttpResponseException) throwable; statusCode = exception.getResponse().getStatusCode(); } } TracerProxy.end(statusCode, throwable, tracingContext.get()); } /** * Create an instance of the default serializer. * * @return the default serializer */ private static SerializerAdapter createDefaultSerializer() { return JacksonAdapter.createDefaultSerializerAdapter(); } /** * Create the default HttpPipeline. * * @return the default HttpPipeline */ private static HttpPipeline createDefaultPipeline() { List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new UserAgentPolicy()); policies.add(new RetryPolicy()); policies.add(new CookiePolicy()); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param <A> the type of the Swagger interface * @return a proxy implementation of the provided Swagger interface */ public static <A> A create(Class<A> swaggerInterface) { return create(swaggerInterface, createDefaultPipeline(), createDefaultSerializer()); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param httpPipeline the HttpPipelinePolicy and HttpClient pipeline that will be used to send Http requests * @param <A> the type of the Swagger interface * @return a proxy implementation of the provided Swagger interface */ public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline) { return create(swaggerInterface, httpPipeline, createDefaultSerializer()); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param httpPipeline the HttpPipelinePolicy and HttpClient pipline that will be used to send Http requests * @param serializer the serializer that will be used to convert POJOs to and from request and response bodies * @param <A> the type of the Swagger interface. * @return a proxy implementation of the provided Swagger interface */ @SuppressWarnings("unchecked") public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline, SerializerAdapter serializer) { final SwaggerInterfaceParser interfaceParser = new SwaggerInterfaceParser(swaggerInterface, serializer); final RestProxy restProxy = new RestProxy(httpPipeline, serializer, interfaceParser); return (A) Proxy.newProxyInstance(swaggerInterface.getClassLoader(), new Class<?>[]{swaggerInterface}, restProxy); } }
We should do a similar validation for Storage too to ensure only one of RetryOptions and RequestRetryOptions is set as we do in other builders for policy and options.
public BlobServiceClientBuilder retryOptions(RetryOptions retryOptions) { Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); return this.retryOptions(RequestRetryOptions.fromRetryOptions(retryOptions, null, null)); }
return this.retryOptions(RequestRetryOptions.fromRetryOptions(retryOptions, null, null));
public BlobServiceClientBuilder retryOptions(RetryOptions retryOptions) { this.coreRetryOptions = retryOptions; return this; }
class BlobServiceClientBuilder implements TokenCredentialTrait<BlobServiceClientBuilder>, ConnectionStringTrait<BlobServiceClientBuilder>, AzureNamedKeyCredentialTrait<BlobServiceClientBuilder>, AzureSasCredentialTrait<BlobServiceClientBuilder>, HttpTrait<BlobServiceClientBuilder>, ConfigurationTrait<BlobServiceClientBuilder> { private final ClientLogger logger = new ClientLogger(BlobServiceClientBuilder.class); private String endpoint; private String accountName; private CpkInfo customerProvidedKey; private EncryptionScope encryptionScope; private BlobContainerEncryptionScope blobContainerEncryptionScope; private StorageSharedKeyCredential storageSharedKeyCredential; private TokenCredential tokenCredential; private AzureSasCredential azureSasCredential; private String sasToken; private HttpClient httpClient; private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>(); private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>(); private HttpLogOptions logOptions; private RequestRetryOptions retryOptions = new RequestRetryOptions(); private HttpPipeline httpPipeline; private ClientOptions clientOptions = new ClientOptions(); private Configuration configuration; private BlobServiceVersion version; /** * Creates a builder instance that is able to configure and construct {@link BlobServiceClient BlobServiceClients} * and {@link BlobServiceAsyncClient BlobServiceAsyncClients}. */ public BlobServiceClientBuilder() { logOptions = getDefaultHttpLogOptions(); } /** * @return a {@link BlobServiceClient} created from the configurations in this builder. * @throws IllegalArgumentException If no credentials are provided. * @throws IllegalStateException If multiple credentials have been specified. */ public BlobServiceClient buildClient() { return new BlobServiceClient(buildAsyncClient()); } /** * @return a {@link BlobServiceAsyncClient} created from the configurations in this builder. * @throws IllegalArgumentException If no credentials are provided. * @throws IllegalStateException If multiple credentials have been specified. */ public BlobServiceAsyncClient buildAsyncClient() { BuilderHelper.httpsValidation(customerProvidedKey, "customer provided key", endpoint, logger); boolean anonymousAccess = false; if (Objects.nonNull(customerProvidedKey) && Objects.nonNull(encryptionScope)) { throw logger.logExceptionAsError(new IllegalArgumentException("Customer provided key and encryption " + "scope cannot both be set")); } BlobServiceVersion serviceVersion = version != null ? version : BlobServiceVersion.getLatest(); HttpPipeline pipeline = (httpPipeline != null) ? httpPipeline : BuilderHelper.buildPipeline( storageSharedKeyCredential, tokenCredential, azureSasCredential, sasToken, endpoint, retryOptions, logOptions, clientOptions, httpClient, perCallPolicies, perRetryPolicies, configuration, logger); boolean foundCredential = false; for (int i = 0; i < pipeline.getPolicyCount(); i++) { if (pipeline.getPolicy(i) instanceof StorageSharedKeyCredentialPolicy) { foundCredential = true; break; } if (pipeline.getPolicy(i) instanceof BearerTokenAuthenticationPolicy) { foundCredential = true; break; } if (pipeline.getPolicy(i) instanceof AzureSasCredentialPolicy) { foundCredential = true; break; } } anonymousAccess = !foundCredential; return new BlobServiceAsyncClient(pipeline, endpoint, serviceVersion, accountName, customerProvidedKey, encryptionScope, blobContainerEncryptionScope, anonymousAccess); } /** * Sets the blob service endpoint, additionally parses it for information (SAS token) * * @param endpoint URL of the service * @return the updated BlobServiceClientBuilder object * @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL. */ public BlobServiceClientBuilder endpoint(String endpoint) { try { BlobUrlParts parts = BlobUrlParts.parse(new URL(endpoint)); this.accountName = parts.getAccountName(); this.endpoint = BuilderHelper.getEndpoint(parts); String sasToken = parts.getCommonSasQueryParameters().encode(); if (!CoreUtils.isNullOrEmpty(sasToken)) { this.sasToken(sasToken); } } catch (MalformedURLException ex) { throw logger.logExceptionAsError( new IllegalArgumentException("The Azure Storage endpoint url is malformed.")); } return this; } /** * Sets the {@link CustomerProvidedKey customer provided key} that is used to encrypt blob contents on the server. * * @param customerProvidedKey Customer provided key containing the encryption key information. * @return the updated BlobServiceClientBuilder object */ public BlobServiceClientBuilder customerProvidedKey(CustomerProvidedKey customerProvidedKey) { if (customerProvidedKey == null) { this.customerProvidedKey = null; } else { this.customerProvidedKey = new CpkInfo() .setEncryptionKey(customerProvidedKey.getKey()) .setEncryptionKeySha256(customerProvidedKey.getKeySha256()) .setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm()); } return this; } /** * Sets the {@code encryption scope} that is used to encrypt blob contents on the server. * * @param encryptionScope Encryption scope containing the encryption key information. * @return the updated BlobServiceClientBuilder object */ public BlobServiceClientBuilder encryptionScope(String encryptionScope) { if (encryptionScope == null) { this.encryptionScope = null; } else { this.encryptionScope = new EncryptionScope().setEncryptionScope(encryptionScope); } return this; } /** * Sets the {@link BlobContainerEncryptionScope encryption scope} that is used to determine how blob contents are * encrypted on the server. * * @param blobContainerEncryptionScope Encryption scope containing the encryption key information. * @return the updated BlobServiceClientBuilder object */ public BlobServiceClientBuilder blobContainerEncryptionScope( BlobContainerEncryptionScope blobContainerEncryptionScope) { this.blobContainerEncryptionScope = blobContainerEncryptionScope; return this; } /** * Sets the {@link StorageSharedKeyCredential} used to authorize requests sent to the service. * * @param credential {@link StorageSharedKeyCredential}. * @return the updated BlobServiceClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ public BlobServiceClientBuilder credential(StorageSharedKeyCredential credential) { this.storageSharedKeyCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.tokenCredential = null; this.sasToken = null; return this; } /** * Sets the {@link AzureNamedKeyCredential} used to authorize requests sent to the service. * * @param credential {@link AzureNamedKeyCredential}. * @return the updated BlobServiceClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public BlobServiceClientBuilder credential(AzureNamedKeyCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); return credential(StorageSharedKeyCredential.fromAzureNamedKeyCredential(credential)); } /** * Sets the {@link TokenCredential} used to authorize requests sent to the service. * * @param credential {@link TokenCredential}. * @return the updated BlobServiceClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public BlobServiceClientBuilder credential(TokenCredential credential) { this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.storageSharedKeyCredential = null; this.sasToken = null; return this; } /** * Sets the SAS token used to authorize requests sent to the service. * * @param sasToken The SAS token to use for authenticating requests. This string should only be the query parameters * (with or without a leading '?') and not a full url. * @return the updated BlobServiceClientBuilder * @throws NullPointerException If {@code sasToken} is {@code null}. */ public BlobServiceClientBuilder sasToken(String sasToken) { this.sasToken = Objects.requireNonNull(sasToken, "'sasToken' cannot be null."); this.storageSharedKeyCredential = null; this.tokenCredential = null; return this; } /** * Sets the {@link AzureSasCredential} used to authorize requests sent to the service. * * @param credential {@link AzureSasCredential} used to authorize requests sent to the service. * @return the updated BlobServiceClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public BlobServiceClientBuilder credential(AzureSasCredential credential) { this.azureSasCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); return this; } /** * Sets the connection string to connect to the service. * * @param connectionString Connection string of the storage account. * @return the updated BlobServiceClientBuilder * @throws IllegalArgumentException If {@code connectionString} in invalid. * @throws NullPointerException If {@code connectionString} is {@code null}. */ @Override public BlobServiceClientBuilder connectionString(String connectionString) { StorageConnectionString storageConnectionString = StorageConnectionString.create(connectionString, logger); StorageEndpoint endpoint = storageConnectionString.getBlobEndpoint(); if (endpoint == null || endpoint.getPrimaryUri() == null) { throw logger .logExceptionAsError(new IllegalArgumentException( "connectionString missing required settings to derive blob service endpoint.")); } this.endpoint(endpoint.getPrimaryUri()); if (storageConnectionString.getAccountName() != null) { this.accountName = storageConnectionString.getAccountName(); } StorageAuthenticationSettings authSettings = storageConnectionString.getStorageAuthSettings(); if (authSettings.getType() == StorageAuthenticationSettings.Type.ACCOUNT_NAME_KEY) { this.credential(new StorageSharedKeyCredential(authSettings.getAccount().getName(), authSettings.getAccount().getAccessKey())); } else if (authSettings.getType() == StorageAuthenticationSettings.Type.SAS_TOKEN) { this.sasToken(authSettings.getSasToken()); } return this; } /** * Sets the {@link HttpClient} to use for sending a receiving requests to and from the service. * * @param httpClient HttpClient to use for requests. * @return the updated BlobServiceClientBuilder object */ @Override public BlobServiceClientBuilder httpClient(HttpClient httpClient) { if (this.httpClient != null && httpClient == null) { logger.info("'httpClient' is being set to 'null' when it was previously configured."); } this.httpClient = httpClient; return this; } /** * Adds a pipeline policy to apply on each request sent. The policy will be added after the retry policy. If * the method is called multiple times, all policies will be added and their order preserved. * * @param pipelinePolicy a pipeline policy * @return the updated BlobServiceClientBuilder object * @throws NullPointerException If {@code pipelinePolicy} is {@code null}. */ @Override public BlobServiceClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { Objects.requireNonNull(pipelinePolicy, "'pipelinePolicy' cannot be null"); if (pipelinePolicy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) { perCallPolicies.add(pipelinePolicy); } else { perRetryPolicies.add(pipelinePolicy); } return this; } /** * Sets the {@link HttpLogOptions} for service requests. * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return the updated BlobServiceClientBuilder object * @throws NullPointerException If {@code logOptions} is {@code null}. */ @Override public BlobServiceClientBuilder httpLogOptions(HttpLogOptions logOptions) { this.logOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null."); return this; } /** * Gets the default Storage allowlist log headers and query parameters. * * @return the default http log options. */ public static HttpLogOptions getDefaultHttpLogOptions() { return BuilderHelper.getDefaultHttpLogOptions(); } /** * Sets the configuration object used to retrieve environment configuration values during building of the client. * * @param configuration Configuration store used to retrieve environment configurations. * @return the updated BlobServiceClientBuilder object */ @Override public BlobServiceClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the request retry options for all the requests made through the client. * * @param retryOptions {@link RequestRetryOptions}. * @return the updated BlobServiceClientBuilder object * @throws NullPointerException If {@code retryOptions} is {@code null}. */ public BlobServiceClientBuilder retryOptions(RequestRetryOptions retryOptions) { this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); return this; } /** * Sets the request retry options for all the requests made through the client. * * Consider using {@link * * @param retryOptions {@link RetryOptions}. * @return the updated BlobServiceClientBuilder object * @throws NullPointerException If {@code retryOptions} is {@code null}. */ @Override /** * Sets the client options for all the requests made through the client. * * @param clientOptions {@link ClientOptions}. * @return the updated BlobServiceClientBuilder object * @throws NullPointerException If {@code clientOptions} is {@code null}. */ public BlobServiceClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = Objects.requireNonNull(clientOptions, "'clientOptions' cannot be null."); return this; } /** * Sets the {@link HttpPipeline} to use for the service client. * * If {@code pipeline} is set, all other settings are ignored, aside from {@link * * @param httpPipeline HttpPipeline to use for sending service requests and receiving responses. * @return the updated BlobServiceClientBuilder object */ @Override public BlobServiceClientBuilder pipeline(HttpPipeline httpPipeline) { if (this.httpPipeline != null && httpPipeline == null) { logger.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.httpPipeline = httpPipeline; return this; } /** * Sets the {@link BlobServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version of the client library will have the result of potentially moving to a newer service version. * <p> * Targeting a specific service version may also mean that the service will return an error for newer APIs. * * @param version {@link BlobServiceVersion} of the service to be used when making requests. * @return the updated BlobServiceClientBuilder object */ public BlobServiceClientBuilder serviceVersion(BlobServiceVersion version) { this.version = version; return this; } }
class BlobServiceClientBuilder implements TokenCredentialTrait<BlobServiceClientBuilder>, ConnectionStringTrait<BlobServiceClientBuilder>, AzureNamedKeyCredentialTrait<BlobServiceClientBuilder>, AzureSasCredentialTrait<BlobServiceClientBuilder>, HttpTrait<BlobServiceClientBuilder>, ConfigurationTrait<BlobServiceClientBuilder>, EndpointTrait<BlobServiceClientBuilder> { private final ClientLogger logger = new ClientLogger(BlobServiceClientBuilder.class); private String endpoint; private String accountName; private CpkInfo customerProvidedKey; private EncryptionScope encryptionScope; private BlobContainerEncryptionScope blobContainerEncryptionScope; private StorageSharedKeyCredential storageSharedKeyCredential; private TokenCredential tokenCredential; private AzureSasCredential azureSasCredential; private String sasToken; private HttpClient httpClient; private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>(); private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>(); private HttpLogOptions logOptions; private RequestRetryOptions retryOptions; private RetryOptions coreRetryOptions; private HttpPipeline httpPipeline; private ClientOptions clientOptions = new ClientOptions(); private Configuration configuration; private BlobServiceVersion version; /** * Creates a builder instance that is able to configure and construct {@link BlobServiceClient BlobServiceClients} * and {@link BlobServiceAsyncClient BlobServiceAsyncClients}. */ public BlobServiceClientBuilder() { logOptions = getDefaultHttpLogOptions(); } /** * @return a {@link BlobServiceClient} created from the configurations in this builder. * @throws IllegalArgumentException If no credentials are provided. * @throws IllegalStateException If multiple credentials have been specified. * @throws IllegalStateException If both {@link * and {@link */ public BlobServiceClient buildClient() { return new BlobServiceClient(buildAsyncClient()); } /** * @return a {@link BlobServiceAsyncClient} created from the configurations in this builder. * @throws IllegalArgumentException If no credentials are provided. * @throws IllegalStateException If multiple credentials have been specified. * @throws IllegalStateException If both {@link * and {@link */ public BlobServiceAsyncClient buildAsyncClient() { BuilderHelper.httpsValidation(customerProvidedKey, "customer provided key", endpoint, logger); boolean anonymousAccess = false; if (Objects.nonNull(customerProvidedKey) && Objects.nonNull(encryptionScope)) { throw logger.logExceptionAsError(new IllegalArgumentException("Customer provided key and encryption " + "scope cannot both be set")); } BlobServiceVersion serviceVersion = version != null ? version : BlobServiceVersion.getLatest(); HttpPipeline pipeline = (httpPipeline != null) ? httpPipeline : BuilderHelper.buildPipeline( storageSharedKeyCredential, tokenCredential, azureSasCredential, sasToken, endpoint, retryOptions, coreRetryOptions, logOptions, clientOptions, httpClient, perCallPolicies, perRetryPolicies, configuration, logger); boolean foundCredential = false; for (int i = 0; i < pipeline.getPolicyCount(); i++) { if (pipeline.getPolicy(i) instanceof StorageSharedKeyCredentialPolicy) { foundCredential = true; break; } if (pipeline.getPolicy(i) instanceof BearerTokenAuthenticationPolicy) { foundCredential = true; break; } if (pipeline.getPolicy(i) instanceof AzureSasCredentialPolicy) { foundCredential = true; break; } } anonymousAccess = !foundCredential; return new BlobServiceAsyncClient(pipeline, endpoint, serviceVersion, accountName, customerProvidedKey, encryptionScope, blobContainerEncryptionScope, anonymousAccess); } /** * Sets the blob service endpoint, additionally parses it for information (SAS token) * * @param endpoint URL of the service * @return the updated BlobServiceClientBuilder object * @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL. */ @Override public BlobServiceClientBuilder endpoint(String endpoint) { try { BlobUrlParts parts = BlobUrlParts.parse(new URL(endpoint)); this.accountName = parts.getAccountName(); this.endpoint = BuilderHelper.getEndpoint(parts); String sasToken = parts.getCommonSasQueryParameters().encode(); if (!CoreUtils.isNullOrEmpty(sasToken)) { this.sasToken(sasToken); } } catch (MalformedURLException ex) { throw logger.logExceptionAsError( new IllegalArgumentException("The Azure Storage endpoint url is malformed.")); } return this; } /** * Sets the {@link CustomerProvidedKey customer provided key} that is used to encrypt blob contents on the server. * * @param customerProvidedKey Customer provided key containing the encryption key information. * @return the updated BlobServiceClientBuilder object */ public BlobServiceClientBuilder customerProvidedKey(CustomerProvidedKey customerProvidedKey) { if (customerProvidedKey == null) { this.customerProvidedKey = null; } else { this.customerProvidedKey = new CpkInfo() .setEncryptionKey(customerProvidedKey.getKey()) .setEncryptionKeySha256(customerProvidedKey.getKeySha256()) .setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm()); } return this; } /** * Sets the {@code encryption scope} that is used to encrypt blob contents on the server. * * @param encryptionScope Encryption scope containing the encryption key information. * @return the updated BlobServiceClientBuilder object */ public BlobServiceClientBuilder encryptionScope(String encryptionScope) { if (encryptionScope == null) { this.encryptionScope = null; } else { this.encryptionScope = new EncryptionScope().setEncryptionScope(encryptionScope); } return this; } /** * Sets the {@link BlobContainerEncryptionScope encryption scope} that is used to determine how blob contents are * encrypted on the server. * * @param blobContainerEncryptionScope Encryption scope containing the encryption key information. * @return the updated BlobServiceClientBuilder object */ public BlobServiceClientBuilder blobContainerEncryptionScope( BlobContainerEncryptionScope blobContainerEncryptionScope) { this.blobContainerEncryptionScope = blobContainerEncryptionScope; return this; } /** * Sets the {@link StorageSharedKeyCredential} used to authorize requests sent to the service. * * @param credential {@link StorageSharedKeyCredential}. * @return the updated BlobServiceClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ public BlobServiceClientBuilder credential(StorageSharedKeyCredential credential) { this.storageSharedKeyCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.tokenCredential = null; this.sasToken = null; return this; } /** * Sets the {@link AzureNamedKeyCredential} used to authorize requests sent to the service. * * @param credential {@link AzureNamedKeyCredential}. * @return the updated BlobServiceClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public BlobServiceClientBuilder credential(AzureNamedKeyCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); return credential(StorageSharedKeyCredential.fromAzureNamedKeyCredential(credential)); } /** * Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java * <a href="https: * documentation for more details on proper usage of the {@link TokenCredential} type. * * @param credential {@link TokenCredential} used to authorize requests sent to the service. * @return the updated BlobServiceClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public BlobServiceClientBuilder credential(TokenCredential credential) { this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.storageSharedKeyCredential = null; this.sasToken = null; return this; } /** * Sets the SAS token used to authorize requests sent to the service. * * @param sasToken The SAS token to use for authenticating requests. This string should only be the query parameters * (with or without a leading '?') and not a full url. * @return the updated BlobServiceClientBuilder * @throws NullPointerException If {@code sasToken} is {@code null}. */ public BlobServiceClientBuilder sasToken(String sasToken) { this.sasToken = Objects.requireNonNull(sasToken, "'sasToken' cannot be null."); this.storageSharedKeyCredential = null; this.tokenCredential = null; return this; } /** * Sets the {@link AzureSasCredential} used to authorize requests sent to the service. * * @param credential {@link AzureSasCredential} used to authorize requests sent to the service. * @return the updated BlobServiceClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public BlobServiceClientBuilder credential(AzureSasCredential credential) { this.azureSasCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); return this; } /** * Sets the connection string to connect to the service. * * @param connectionString Connection string of the storage account. * @return the updated BlobServiceClientBuilder * @throws IllegalArgumentException If {@code connectionString} in invalid. * @throws NullPointerException If {@code connectionString} is {@code null}. */ @Override public BlobServiceClientBuilder connectionString(String connectionString) { StorageConnectionString storageConnectionString = StorageConnectionString.create(connectionString, logger); StorageEndpoint endpoint = storageConnectionString.getBlobEndpoint(); if (endpoint == null || endpoint.getPrimaryUri() == null) { throw logger .logExceptionAsError(new IllegalArgumentException( "connectionString missing required settings to derive blob service endpoint.")); } this.endpoint(endpoint.getPrimaryUri()); if (storageConnectionString.getAccountName() != null) { this.accountName = storageConnectionString.getAccountName(); } StorageAuthenticationSettings authSettings = storageConnectionString.getStorageAuthSettings(); if (authSettings.getType() == StorageAuthenticationSettings.Type.ACCOUNT_NAME_KEY) { this.credential(new StorageSharedKeyCredential(authSettings.getAccount().getName(), authSettings.getAccount().getAccessKey())); } else if (authSettings.getType() == StorageAuthenticationSettings.Type.SAS_TOKEN) { this.sasToken(authSettings.getSasToken()); } return this; } /** * Sets the {@link HttpClient} to use for sending and receiving requests to and from the service. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param httpClient The {@link HttpClient} to use for requests. * @return the updated BlobServiceClientBuilder object */ @Override public BlobServiceClientBuilder httpClient(HttpClient httpClient) { if (this.httpClient != null && httpClient == null) { logger.info("'httpClient' is being set to 'null' when it was previously configured."); } this.httpClient = httpClient; return this; } /** * Adds a {@link HttpPipelinePolicy pipeline policy} to apply on each request sent. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param pipelinePolicy A {@link HttpPipelinePolicy pipeline policy}. * @return the updated BlobServiceClientBuilder object * @throws NullPointerException If {@code pipelinePolicy} is {@code null}. */ @Override public BlobServiceClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { Objects.requireNonNull(pipelinePolicy, "'pipelinePolicy' cannot be null"); if (pipelinePolicy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) { perCallPolicies.add(pipelinePolicy); } else { perRetryPolicies.add(pipelinePolicy); } return this; } /** * Sets the {@link HttpLogOptions logging configuration} to use when sending and receiving requests to and from * the service. If a {@code logLevel} is not provided, default value of {@link HttpLogDetailLevel * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param logOptions The {@link HttpLogOptions logging configuration} to use when sending and receiving requests to * and from the service. * @return the updated BlobServiceClientBuilder object * @throws NullPointerException If {@code logOptions} is {@code null}. */ @Override public BlobServiceClientBuilder httpLogOptions(HttpLogOptions logOptions) { this.logOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null."); return this; } /** * Gets the default Storage allowlist log headers and query parameters. * * @return the default http log options. */ public static HttpLogOptions getDefaultHttpLogOptions() { return BuilderHelper.getDefaultHttpLogOptions(); } /** * Sets the configuration object used to retrieve environment configuration values during building of the client. * * @param configuration Configuration store used to retrieve environment configurations. * @return the updated BlobServiceClientBuilder object */ @Override public BlobServiceClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the request retry options for all the requests made through the client. * * Setting this is mutually exclusive with using {@link * * @param retryOptions {@link RequestRetryOptions}. * @return the updated BlobServiceClientBuilder object */ public BlobServiceClientBuilder retryOptions(RequestRetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } /** * Sets the {@link RetryOptions} for all the requests made through the client. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * <p> * Setting this is mutually exclusive with using {@link * Consider using {@link * * @param retryOptions The {@link RetryOptions} to use for all the requests made through the client. * @return the updated BlobServiceClientBuilder object */ @Override /** * Allows for setting common properties such as application ID, headers, proxy configuration, etc. Note that it is * recommended that this method be called with an instance of the {@link HttpClientOptions} * class (a subclass of the {@link ClientOptions} base class). The HttpClientOptions subclass provides more * configuration options suitable for HTTP clients, which is applicable for any class that implements this HttpTrait * interface. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param clientOptions A configured instance of {@link HttpClientOptions}. * @see HttpClientOptions * @return the updated BlobServiceClientBuilder object * @throws NullPointerException If {@code clientOptions} is {@code null}. */ @Override public BlobServiceClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = Objects.requireNonNull(clientOptions, "'clientOptions' cannot be null."); return this; } /** * Sets the {@link HttpPipeline} to use for the service client. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * <p> * The {@link * * @param httpPipeline {@link HttpPipeline} to use for sending service requests and receiving responses. * @return the updated BlobServiceClientBuilder object */ @Override public BlobServiceClientBuilder pipeline(HttpPipeline httpPipeline) { if (this.httpPipeline != null && httpPipeline == null) { logger.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.httpPipeline = httpPipeline; return this; } /** * Sets the {@link BlobServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version of the client library will have the result of potentially moving to a newer service version. * <p> * Targeting a specific service version may also mean that the service will return an error for newer APIs. * * @param version {@link BlobServiceVersion} of the service to be used when making requests. * @return the updated BlobServiceClientBuilder object */ public BlobServiceClientBuilder serviceVersion(BlobServiceVersion version) { this.version = version; return this; } }
I assume these two properties are not relevant to resource manager?
void testAzureProfileWithAzureChina() { this.contextRunner .withUserConfiguration(AzureGlobalPropertiesAutoConfiguration.class) .withBean(AzureResourceManager.class, AzureResourceManagerExt::getAzureResourceManager) .withPropertyValues( "spring.cloud.azure.profile.tenant-id=test-tenant-id", "spring.cloud.azure.profile.subscription-id=test-subscription-id", "spring.cloud.azure.profile.cloud=AZURE_CHINA" ) .run(context -> { assertThat(context).hasSingleBean(AzureProfile.class); AzureProfile azureProfile = context.getBean(AzureProfile.class); Assertions.assertEquals(azureProfile.getEnvironment().getActiveDirectoryEndpoint(), AZURE_CHINA.getActiveDirectoryEndpoint()); }); }
Assertions.assertEquals(azureProfile.getEnvironment().getActiveDirectoryEndpoint(),
void testAzureProfileWithAzureChina() { this.contextRunner .withUserConfiguration(AzureGlobalPropertiesAutoConfiguration.class) .withBean(AzureResourceManager.class, TestAzureResourceManager::getAzureResourceManager) .withPropertyValues( "spring.cloud.azure.profile.tenant-id=test-tenant-id", "spring.cloud.azure.profile.subscription-id=test-subscription-id", "spring.cloud.azure.profile.cloud=AZURE_CHINA" ) .run(context -> { assertThat(context).hasSingleBean(AzureProfile.class); AzureProfile azureProfile = context.getBean(AzureProfile.class); Assertions.assertEquals(azureProfile.getEnvironment().getActiveDirectoryEndpoint(), AZURE_CHINA.getActiveDirectoryEndpoint()); }); }
class AzureResourceManagerAutoConfigurationTest { private final ApplicationContextRunner contextRunner = new ApplicationContextRunner() .withConfiguration(AutoConfigurations.of(AzureResourceManagerAutoConfiguration.class)); @Test void testAzureResourceManagerDisabled() { this.contextRunner .withPropertyValues("spring.cloud.azure.resourcemanager.enabled=false") .run(context -> { assertThat(context).doesNotHaveBean(AzureResourceManager.class); assertThat(context).doesNotHaveBean(AzureProfile.class); }); } @Test void configureWithoutTenantId() { this.contextRunner .withPropertyValues("spring.cloud.azure.resourcemanager.enabled=true") .run(context -> { assertThat(context).doesNotHaveBean(AzureResourceManager.class); assertThat(context).doesNotHaveBean(AzureProfile.class); }); } @Test void configureWithTenantId() { this.contextRunner .withPropertyValues("spring.cloud.azure.profile.tenant-id=test-tenant") .run(context -> { assertThat(context).doesNotHaveBean(AzureResourceManager.class); assertThat(context).doesNotHaveBean(AzureProfile.class); }); } @Test void testWithoutAzureResourceManagerClass() { this.contextRunner.withClassLoader(new FilteredClassLoader(AzureResourceManager.class)) .run(context -> assertThat(context).doesNotHaveBean(AzureProfile.class)); } @Test void testWithoutAzureResourceMetadataClass() { this.contextRunner.withClassLoader(new FilteredClassLoader(AzureResourceMetadata.class)) .run(context -> assertThat(context).doesNotHaveBean(AzureProfile.class)); } @Test void testAzureProfileWithAzureDefault() { this.contextRunner .withUserConfiguration(AzureGlobalPropertiesAutoConfiguration.class) .withBean(AzureResourceManager.class, AzureResourceManagerExt::getAzureResourceManager) .withPropertyValues( "spring.cloud.azure.profile.tenant-id=test-tenant-id", "spring.cloud.azure.profile.subscription-id=test-subscription-id" ) .run(context -> { assertThat(context).hasSingleBean(AzureProfile.class); AzureProfile azureProfile = context.getBean(AzureProfile.class); Assertions.assertEquals(azureProfile.getEnvironment().getActiveDirectoryEndpoint(), AZURE.getActiveDirectoryEndpoint()); }); } @Test static class AzureResourceManagerExt { static AzureResourceManager getAzureResourceManager() { return mock(AzureResourceManager.class); } } }
class AzureResourceManagerAutoConfigurationTest { private final ApplicationContextRunner contextRunner = new ApplicationContextRunner() .withConfiguration(AutoConfigurations.of(AzureResourceManagerAutoConfiguration.class)); @Test void testAzureResourceManagerDisabled() { this.contextRunner .withPropertyValues("spring.cloud.azure.resourcemanager.enabled=false") .run(context -> { assertThat(context).doesNotHaveBean(AzureResourceManager.class); assertThat(context).doesNotHaveBean(AzureProfile.class); }); } @Test void configureWithoutTenantId() { this.contextRunner .withPropertyValues("spring.cloud.azure.resourcemanager.enabled=true") .run(context -> { assertThat(context).doesNotHaveBean(AzureResourceManager.class); assertThat(context).doesNotHaveBean(AzureProfile.class); }); } @Test void configureWithTenantId() { this.contextRunner .withPropertyValues("spring.cloud.azure.profile.tenant-id=test-tenant") .run(context -> { assertThat(context).doesNotHaveBean(AzureResourceManager.class); assertThat(context).doesNotHaveBean(AzureProfile.class); }); } @Test void testWithoutAzureResourceManagerClass() { this.contextRunner.withClassLoader(new FilteredClassLoader(AzureResourceManager.class)) .run(context -> assertThat(context).doesNotHaveBean(AzureProfile.class)); } @Test void testWithoutAzureResourceMetadataClass() { this.contextRunner.withClassLoader(new FilteredClassLoader(AzureResourceMetadata.class)) .run(context -> assertThat(context).doesNotHaveBean(AzureProfile.class)); } @Test void testAzureProfileWithAzureDefault() { this.contextRunner .withUserConfiguration(AzureGlobalPropertiesAutoConfiguration.class) .withBean(AzureResourceManager.class, TestAzureResourceManager::getAzureResourceManager) .withPropertyValues( "spring.cloud.azure.profile.tenant-id=test-tenant-id", "spring.cloud.azure.profile.subscription-id=test-subscription-id" ) .run(context -> { assertThat(context).hasSingleBean(AzureProfile.class); AzureProfile azureProfile = context.getBean(AzureProfile.class); Assertions.assertEquals(azureProfile.getEnvironment().getActiveDirectoryEndpoint(), AZURE.getActiveDirectoryEndpoint()); }); } @Test }
Should we encode the valueStr to ensure we have a valid json? If `valueStr` contains a `"` it will result in an invalid json.
public StringBuilder writeKeyAndValue(StringBuilder formatter) { formatter.append("\"") .append(key) .append("\"") .append(":"); String valueStr = null; if (value != null) { if (!(value instanceof String)) { return formatter.append(value); } valueStr = (String) value; } else if (valueSupplier != null) { valueStr = valueSupplier.get(); } if (valueStr == null) { return formatter.append("null"); } return formatter.append("\"") .append(valueStr) .append("\""); }
.append(valueStr)
public StringBuilder writeKeyAndValue(StringBuilder formatter) { formatter.append("\""); JSON_STRING_ENCODER.quoteAsString(key, formatter); formatter.append("\":"); String valueStr = null; if (value != null) { if (!(value instanceof String)) { JSON_STRING_ENCODER.quoteAsString(value.toString(), formatter); return formatter; } valueStr = (String) value; } else if (valueSupplier != null) { valueStr = valueSupplier.get(); } if (valueStr == null) { return formatter.append("null"); } formatter.append("\""); JSON_STRING_ENCODER.quoteAsString(valueStr, formatter); return formatter.append("\""); }
class ContextKeyValuePair { private final String key; private final Object value; private final Supplier<String> valueSupplier; ContextKeyValuePair(String key, Object value) { this.key = key; this.value = value; this.valueSupplier = null; } ContextKeyValuePair(String key, Supplier<String> valueSupplier) { this.key = key; this.value = null; this.valueSupplier = valueSupplier; } /** * Writes {"key":"value"} json string to provided StringBuilder. */ }
class ContextKeyValuePair { private final String key; private final Object value; private final Supplier<String> valueSupplier; ContextKeyValuePair(String key, Object value) { this.key = key; this.value = value; this.valueSupplier = null; } ContextKeyValuePair(String key, Supplier<String> valueSupplier) { this.key = key; this.value = null; this.valueSupplier = valueSupplier; } /** * Writes "key":"value" json string to provided StringBuilder. */ }
Will this result in `long`, `bool` types formatted as string? `"key": "false"` instead of `"key": false`
public StringBuilder writeKeyAndValue(StringBuilder formatter) { formatter.append("\""); JSON_STRING_ENCODER.quoteAsString(key, formatter); formatter.append("\":"); String valueStr = null; if (value != null) { if (!(value instanceof String)) { JSON_STRING_ENCODER.quoteAsString(value.toString(), formatter); return formatter; } valueStr = (String) value; } else if (valueSupplier != null) { valueStr = valueSupplier.get(); } if (valueStr == null) { return formatter.append("null"); } formatter.append("\""); JSON_STRING_ENCODER.quoteAsString(valueStr, formatter); return formatter.append("\""); }
JSON_STRING_ENCODER.quoteAsString(value.toString(), formatter);
public StringBuilder writeKeyAndValue(StringBuilder formatter) { formatter.append("\""); JSON_STRING_ENCODER.quoteAsString(key, formatter); formatter.append("\":"); String valueStr = null; if (value != null) { if (!(value instanceof String)) { JSON_STRING_ENCODER.quoteAsString(value.toString(), formatter); return formatter; } valueStr = (String) value; } else if (valueSupplier != null) { valueStr = valueSupplier.get(); } if (valueStr == null) { return formatter.append("null"); } formatter.append("\""); JSON_STRING_ENCODER.quoteAsString(valueStr, formatter); return formatter.append("\""); }
class ContextKeyValuePair { private final String key; private final Object value; private final Supplier<String> valueSupplier; ContextKeyValuePair(String key, Object value) { this.key = key; this.value = value; this.valueSupplier = null; } ContextKeyValuePair(String key, Supplier<String> valueSupplier) { this.key = key; this.value = null; this.valueSupplier = valueSupplier; } /** * Writes {"key":"value"} json string to provided StringBuilder. */ }
class ContextKeyValuePair { private final String key; private final Object value; private final Supplier<String> valueSupplier; ContextKeyValuePair(String key, Object value) { this.key = key; this.value = value; this.valueSupplier = null; } ContextKeyValuePair(String key, Supplier<String> valueSupplier) { this.key = key; this.value = null; this.valueSupplier = valueSupplier; } /** * Writes "key":"value" json string to provided StringBuilder. */ }
I thought we were going to add standalone samples to showcase this? If we add it here, I think there could be confusion for our data gathering purposes about whether the customer was simply looking at a sample about how to analyze layout vs looking for how to use the spans to find related elements.
public static void main(final String[] args) throws IOException { DocumentAnalysisClient client = new DocumentAnalysisClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("https: .buildClient(); File selectionMarkDocument = new File("../formrecognizer/azure-ai-formrecognizer/src/samples/resources/" + "sample-forms/forms/selectionMarkForm.pdf"); byte[] fileContent = Files.readAllBytes(selectionMarkDocument.toPath()); InputStream fileStream = new ByteArrayInputStream(fileContent); SyncPoller<DocumentOperationResult, AnalyzeResult> analyzeLayoutResultPoller = client.beginAnalyzeDocument("prebuilt-layout", fileStream, selectionMarkDocument.length()); AnalyzeResult analyzeLayoutResult = analyzeLayoutResultPoller.getFinalResult(); analyzeLayoutResult.getPages().forEach(documentPage -> { System.out.printf("Page has width: %.2f and height: %.2f, measured with unit: %s%n", documentPage.getWidth(), documentPage.getHeight(), documentPage.getUnit()); documentPage.getLines().forEach(documentLine -> { System.out.printf("Line '%s' is within a bounding box %s.%n", documentLine.getContent(), documentLine.getBoundingBox().toString()); List<DocumentWord> containedWords = getWordsInALine(documentLine, documentPage.getWords()); System.out.printf("Total number of words in the line: %d.%n", containedWords.size()); System.out.printf("Words contained in the line are: %s.%n", containedWords.stream().map(DocumentWord::getContent).collect(Collectors.toList())); }); documentPage.getWords().forEach(documentWord -> System.out.printf("Word '%s' has a confidence score of %.2f.%n", documentWord.getContent(), documentWord.getConfidence())); documentPage.getSelectionMarks().forEach(documentSelectionMark -> System.out.printf("Selection mark is '%s' and is within a bounding box %s with confidence %.2f.%n", documentSelectionMark.getState().toString(), documentSelectionMark.getBoundingBox().toString(), documentSelectionMark.getConfidence())); }); List<DocumentTable> tables = analyzeLayoutResult.getTables(); for (int i = 0; i < tables.size(); i++) { DocumentTable documentTable = tables.get(i); System.out.printf("Table %d has %d rows and %d columns.%n", i, documentTable.getRowCount(), documentTable.getColumnCount()); documentTable.getCells().forEach(documentTableCell -> { System.out.printf("Cell '%s', has row index %d and column index %d.%n", documentTableCell.getContent(), documentTableCell.getRowIndex(), documentTableCell.getColumnIndex()); }); System.out.println(); } analyzeLayoutResult.getStyles().forEach(documentStyle -> System.out.printf("Document is handwritten %s.%n", documentStyle.isHandwritten())); }
List<DocumentWord> containedWords = getWordsInALine(documentLine, documentPage.getWords());
public static void main(final String[] args) throws IOException { DocumentAnalysisClient client = new DocumentAnalysisClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("https: .buildClient(); File selectionMarkDocument = new File("../formrecognizer/azure-ai-formrecognizer/src/samples/resources/" + "sample-forms/forms/selectionMarkForm.pdf"); byte[] fileContent = Files.readAllBytes(selectionMarkDocument.toPath()); InputStream fileStream = new ByteArrayInputStream(fileContent); SyncPoller<DocumentOperationResult, AnalyzeResult> analyzeLayoutResultPoller = client.beginAnalyzeDocument("prebuilt-layout", fileStream, selectionMarkDocument.length()); AnalyzeResult analyzeLayoutResult = analyzeLayoutResultPoller.getFinalResult(); analyzeLayoutResult.getPages().forEach(documentPage -> { System.out.printf("Page has width: %.2f and height: %.2f, measured with unit: %s%n", documentPage.getWidth(), documentPage.getHeight(), documentPage.getUnit()); documentPage.getLines().forEach(documentLine -> System.out.printf("Line '%s; is within a bounding box %s.%n", documentLine.getContent(), documentLine.getBoundingBox().toString())); documentPage.getWords().forEach(documentWord -> System.out.printf("Word '%s' has a confidence score of %.2f%n.", documentWord.getContent(), documentWord.getConfidence())); documentPage.getSelectionMarks().forEach(documentSelectionMark -> System.out.printf("Selection mark is '%s' and is within a bounding box %s with confidence %.2f.%n", documentSelectionMark.getState().toString(), documentSelectionMark.getBoundingBox().toString(), documentSelectionMark.getConfidence())); }); List<DocumentTable> tables = analyzeLayoutResult.getTables(); for (int i = 0; i < tables.size(); i++) { DocumentTable documentTable = tables.get(i); System.out.printf("Table %d has %d rows and %d columns.%n", i, documentTable.getRowCount(), documentTable.getColumnCount()); documentTable.getCells().forEach(documentTableCell -> { System.out.printf("Cell '%s', has row index %d and column index %d.%n", documentTableCell.getContent(), documentTableCell.getRowIndex(), documentTableCell.getColumnIndex()); }); System.out.println(); } analyzeLayoutResult.getStyles().forEach(documentStyle -> System.out.printf("Document is handwritten %s%n.", documentStyle.isHandwritten())); }
class AnalyzeLayout { /** * Main method to invoke this demo. * * @param args Unused. Arguments to the program. * @throws IOException Exception thrown when there is an error in reading all the bytes from the File. */ private static List<DocumentWord> getWordsInALine(DocumentLine documentLine, List<DocumentWord> pageWords) { List<DocumentWord> containedWords = new ArrayList<>(); pageWords.forEach(documentWord -> { documentLine.getSpans().forEach(documentSpan -> { if ((documentWord.getSpan().getOffset() >= documentSpan.getOffset()) && ((documentWord.getSpan().getOffset() + documentWord.getSpan().getLength()) <= (documentSpan.getOffset() + documentSpan.getLength()))) { containedWords.add(documentWord); } }); }); return containedWords; } }
class AnalyzeLayout { /** * Main method to invoke this demo. * * @param args Unused. Arguments to the program. * @throws IOException Exception thrown when there is an error in reading all the bytes from the File. */ }
format
public void canRedisVersionUpdate(){ RedisCache.MajorVersion redisVersion = RedisCache.MajorVersion.V4; RedisCache redisCache = redisManager .redisCaches() .define(rrName) .withRegion(Region.ASIA_EAST) .withNewResourceGroup(rgName) .withBasicSku() .withRedisVersion(redisVersion) .create() ; Assertions.assertTrue(redisCache.redisVersion().startsWith(redisVersion.getValue())); redisVersion = RedisCache.MajorVersion.V6; redisCache = redisCache.update() .withRedisVersion(redisVersion) .apply(); ResourceManagerUtils.sleep(Duration.ofSeconds(300)); redisCache = redisCache.refresh(); Assertions.assertTrue(redisCache.redisVersion().startsWith(redisVersion.getValue())); }
;
public void canRedisVersionUpdate() { RedisCache.RedisVersion redisVersion = RedisCache.RedisVersion.V4; RedisCache redisCache = redisManager .redisCaches() .define(rrName) .withRegion(Region.ASIA_EAST) .withNewResourceGroup(rgName) .withBasicSku() .withRedisVersion(redisVersion) .create(); Assertions.assertTrue(redisCache.redisVersion().startsWith(redisVersion.getValue())); redisVersion = RedisCache.RedisVersion.V6; redisCache = redisCache.update() .withRedisVersion(redisVersion) .apply(); ResourceManagerUtils.sleep(Duration.ofSeconds(300)); redisCache = redisCache.refresh(); Assertions.assertTrue(redisCache.redisVersion().startsWith(redisVersion.getValue())); }
class RedisCacheOperationsTests extends RedisManagementTest { @Test @SuppressWarnings("unchecked") public void canCRUDRedisCache() throws Exception { Creatable<ResourceGroup> resourceGroups = resourceManager.resourceGroups().define(rgNameSecond).withRegion(Region.US_CENTRAL); Creatable<RedisCache> redisCacheDefinition1 = redisManager .redisCaches() .define(rrName) .withRegion(Region.ASIA_EAST) .withNewResourceGroup(rgName) .withBasicSku(); Creatable<RedisCache> redisCacheDefinition2 = redisManager .redisCaches() .define(rrNameSecond) .withRegion(Region.US_CENTRAL) .withNewResourceGroup(resourceGroups) .withPremiumSku() .withShardCount(2) .withPatchSchedule(DayOfWeek.SUNDAY, 10, Duration.ofMinutes(302)); Creatable<RedisCache> redisCacheDefinition3 = redisManager .redisCaches() .define(rrNameThird) .withRegion(Region.US_CENTRAL) .withNewResourceGroup(resourceGroups) .withPremiumSku(2) .withRedisConfiguration("maxclients", "2") .withNonSslPort() .withFirewallRule("rule1", "192.168.0.1", "192.168.0.4") .withFirewallRule("rule2", "192.168.0.10", "192.168.0.40"); CreatedResources<RedisCache> batchRedisCaches = redisManager.redisCaches().create(redisCacheDefinition1, redisCacheDefinition2, redisCacheDefinition3); RedisCache redisCache = batchRedisCaches.get(redisCacheDefinition1.key()); RedisCache redisCachePremium = batchRedisCaches.get(redisCacheDefinition3.key()); Assertions.assertEquals(rgName, redisCache.resourceGroupName()); Assertions.assertEquals(SkuName.BASIC, redisCache.sku().name()); RedisCachePremium premiumCache = redisCachePremium.asPremium(); Assertions.assertEquals(SkuFamily.P, premiumCache.sku().family()); Assertions.assertEquals(2, premiumCache.firewallRules().size()); Assertions.assertTrue(premiumCache.firewallRules().containsKey("rule1")); Assertions.assertTrue(premiumCache.firewallRules().containsKey("rule2")); premiumCache .update() .withRedisConfiguration("maxclients", "3") .withoutFirewallRule("rule1") .withFirewallRule("rule3", "192.168.0.10", "192.168.0.104") .withoutMinimumTlsVersion() .apply(); Thread.sleep(10000); premiumCache.refresh(); Assertions.assertEquals(2, premiumCache.firewallRules().size()); Assertions.assertTrue(premiumCache.firewallRules().containsKey("rule2")); Assertions.assertTrue(premiumCache.firewallRules().containsKey("rule3")); Assertions.assertFalse(premiumCache.firewallRules().containsKey("rule1")); premiumCache.update().withoutRedisConfiguration("maxclients").apply(); premiumCache.update().withoutRedisConfiguration().apply(); Assertions.assertEquals(0, premiumCache.patchSchedules().size()); premiumCache.update().withPatchSchedule(DayOfWeek.MONDAY, 1).withPatchSchedule(DayOfWeek.TUESDAY, 5).apply(); Assertions.assertEquals(2, premiumCache.patchSchedules().size()); premiumCache.forceReboot(RebootType.ALL_NODES); List<ScheduleEntry> patchSchedule = premiumCache.listPatchSchedules(); Assertions.assertEquals(2, patchSchedule.size()); premiumCache.deletePatchSchedule(); patchSchedule = redisManager.redisCaches().getById(premiumCache.id()).asPremium().listPatchSchedules(); Assertions.assertNull(patchSchedule); List<RedisCache> redisCaches = redisManager.redisCaches().listByResourceGroup(rgName).stream().collect(Collectors.toList()); boolean found = false; for (RedisCache existingRedisCache : redisCaches) { if (existingRedisCache.name().equals(rrName)) { found = true; } } Assertions.assertTrue(found); Assertions.assertEquals(1, redisCaches.size()); redisCaches = redisManager.redisCaches().list().stream().collect(Collectors.toList()); found = false; for (RedisCache existingRedisCache : redisCaches) { if (existingRedisCache.name().equals(rrName)) { found = true; } } Assertions.assertTrue(found); Assertions.assertTrue(redisCaches.size() >= 3); RedisCache redisCacheGet = redisManager.redisCaches().getByResourceGroup(rgName, rrName); Assertions.assertNotNull(redisCacheGet); Assertions.assertEquals(redisCache.id(), redisCacheGet.id()); Assertions.assertEquals(redisCache.provisioningState(), redisCacheGet.provisioningState()); RedisAccessKeys redisKeys = redisCache.keys(); Assertions.assertNotNull(redisKeys); Assertions.assertNotNull(redisKeys.primaryKey()); Assertions.assertNotNull(redisKeys.secondaryKey()); RedisAccessKeys oldKeys = redisCache.refreshKeys(); RedisAccessKeys updatedPrimaryKey = redisCache.regenerateKey(RedisKeyType.PRIMARY); RedisAccessKeys updatedSecondaryKey = redisCache.regenerateKey(RedisKeyType.SECONDARY); Assertions.assertNotNull(oldKeys); Assertions.assertNotNull(updatedPrimaryKey); Assertions.assertNotNull(updatedSecondaryKey); if (!isPlaybackMode()) { Assertions.assertNotEquals(oldKeys.primaryKey(), updatedPrimaryKey.primaryKey()); Assertions.assertEquals(oldKeys.secondaryKey(), updatedPrimaryKey.secondaryKey()); Assertions.assertNotEquals(oldKeys.secondaryKey(), updatedSecondaryKey.secondaryKey()); Assertions.assertNotEquals(updatedPrimaryKey.secondaryKey(), updatedSecondaryKey.secondaryKey()); Assertions.assertEquals(updatedPrimaryKey.primaryKey(), updatedSecondaryKey.primaryKey()); } redisCache = redisCache.update().withStandardSku().apply(); Assertions.assertEquals(SkuName.STANDARD, redisCache.sku().name()); Assertions.assertEquals(SkuFamily.C, redisCache.sku().family()); try { redisCache.update().withBasicSku(1).apply(); Assertions.fail(); } catch (ManagementException e) { } redisCache.refresh(); redisManager.redisCaches().deleteById(redisCache.id()); /*premiumCache.exportData(storageAccount.name(),"snapshot1"); premiumCache.importData(Arrays.asList("snapshot1"));*/ } @Test @Test public void canCRUDLinkedServers() throws Exception { RedisCache rgg = redisManager .redisCaches() .define(rrNameThird) .withRegion(Region.US_CENTRAL) .withNewResourceGroup(rgNameSecond) .withPremiumSku(2) .withPatchSchedule(DayOfWeek.SATURDAY, 5, Duration.ofHours(5)) .withRedisConfiguration("maxclients", "2") .withNonSslPort() .withFirewallRule("rule1", "192.168.0.1", "192.168.0.4") .withFirewallRule("rule2", "192.168.0.10", "192.168.0.40") .create(); RedisCache rggLinked = redisManager .redisCaches() .define(rrNameSecond) .withRegion(Region.US_EAST) .withExistingResourceGroup(rgNameSecond) .withPremiumSku(2) .create(); Assertions.assertNotNull(rgg); Assertions.assertNotNull(rggLinked); RedisCachePremium premiumRgg = rgg.asPremium(); String llName = premiumRgg.addLinkedServer(rggLinked.id(), rggLinked.regionName(), ReplicationRole.PRIMARY); Assertions.assertEquals(ResourceUtils.nameFromResourceId(rggLinked.id()), llName); Map<String, ReplicationRole> linkedServers = premiumRgg.listLinkedServers(); Assertions.assertEquals(1, linkedServers.size()); Assertions.assertTrue(linkedServers.keySet().contains(llName)); Assertions.assertEquals(ReplicationRole.PRIMARY, linkedServers.get(llName)); ReplicationRole repRole = premiumRgg.getLinkedServerRole(llName); Assertions.assertEquals(ReplicationRole.PRIMARY, repRole); premiumRgg.removeLinkedServer(llName); rgg.update().withoutPatchSchedule().apply(); rggLinked.update().withFirewallRule("rulesmhule", "192.168.1.10", "192.168.1.20").apply(); linkedServers = premiumRgg.listLinkedServers(); Assertions.assertEquals(0, linkedServers.size()); } }
class RedisCacheOperationsTests extends RedisManagementTest { @Test @SuppressWarnings("unchecked") public void canCRUDRedisCache() throws Exception { Creatable<ResourceGroup> resourceGroups = resourceManager.resourceGroups().define(rgNameSecond).withRegion(Region.US_CENTRAL); Creatable<RedisCache> redisCacheDefinition1 = redisManager .redisCaches() .define(rrName) .withRegion(Region.ASIA_EAST) .withNewResourceGroup(rgName) .withBasicSku(); Creatable<RedisCache> redisCacheDefinition2 = redisManager .redisCaches() .define(rrNameSecond) .withRegion(Region.US_CENTRAL) .withNewResourceGroup(resourceGroups) .withPremiumSku() .withShardCount(2) .withPatchSchedule(DayOfWeek.SUNDAY, 10, Duration.ofMinutes(302)); Creatable<RedisCache> redisCacheDefinition3 = redisManager .redisCaches() .define(rrNameThird) .withRegion(Region.US_CENTRAL) .withNewResourceGroup(resourceGroups) .withPremiumSku(2) .withRedisConfiguration("maxclients", "2") .withNonSslPort() .withFirewallRule("rule1", "192.168.0.1", "192.168.0.4") .withFirewallRule("rule2", "192.168.0.10", "192.168.0.40"); CreatedResources<RedisCache> batchRedisCaches = redisManager.redisCaches().create(redisCacheDefinition1, redisCacheDefinition2, redisCacheDefinition3); RedisCache redisCache = batchRedisCaches.get(redisCacheDefinition1.key()); RedisCache redisCachePremium = batchRedisCaches.get(redisCacheDefinition3.key()); Assertions.assertEquals(rgName, redisCache.resourceGroupName()); Assertions.assertEquals(SkuName.BASIC, redisCache.sku().name()); RedisCachePremium premiumCache = redisCachePremium.asPremium(); Assertions.assertEquals(SkuFamily.P, premiumCache.sku().family()); Assertions.assertEquals(2, premiumCache.firewallRules().size()); Assertions.assertTrue(premiumCache.firewallRules().containsKey("rule1")); Assertions.assertTrue(premiumCache.firewallRules().containsKey("rule2")); premiumCache .update() .withRedisConfiguration("maxclients", "3") .withoutFirewallRule("rule1") .withFirewallRule("rule3", "192.168.0.10", "192.168.0.104") .withoutMinimumTlsVersion() .apply(); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); premiumCache.refresh(); Assertions.assertEquals(2, premiumCache.firewallRules().size()); Assertions.assertTrue(premiumCache.firewallRules().containsKey("rule2")); Assertions.assertTrue(premiumCache.firewallRules().containsKey("rule3")); Assertions.assertFalse(premiumCache.firewallRules().containsKey("rule1")); premiumCache.update().withoutRedisConfiguration("maxclients").apply(); premiumCache.update().withoutRedisConfiguration().apply(); Assertions.assertEquals(0, premiumCache.patchSchedules().size()); premiumCache.update().withPatchSchedule(DayOfWeek.MONDAY, 1).withPatchSchedule(DayOfWeek.TUESDAY, 5).apply(); Assertions.assertEquals(2, premiumCache.patchSchedules().size()); premiumCache.forceReboot(RebootType.ALL_NODES); List<ScheduleEntry> patchSchedule = premiumCache.listPatchSchedules(); Assertions.assertEquals(2, patchSchedule.size()); premiumCache.deletePatchSchedule(); patchSchedule = redisManager.redisCaches().getById(premiumCache.id()).asPremium().listPatchSchedules(); Assertions.assertNull(patchSchedule); List<RedisCache> redisCaches = redisManager.redisCaches().listByResourceGroup(rgName).stream().collect(Collectors.toList()); boolean found = false; for (RedisCache existingRedisCache : redisCaches) { if (existingRedisCache.name().equals(rrName)) { found = true; } } Assertions.assertTrue(found); Assertions.assertEquals(1, redisCaches.size()); redisCaches = redisManager.redisCaches().list().stream().collect(Collectors.toList()); found = false; for (RedisCache existingRedisCache : redisCaches) { if (existingRedisCache.name().equals(rrName)) { found = true; } } Assertions.assertTrue(found); Assertions.assertTrue(redisCaches.size() >= 3); RedisCache redisCacheGet = redisManager.redisCaches().getByResourceGroup(rgName, rrName); Assertions.assertNotNull(redisCacheGet); Assertions.assertEquals(redisCache.id(), redisCacheGet.id()); Assertions.assertEquals(redisCache.provisioningState(), redisCacheGet.provisioningState()); RedisAccessKeys redisKeys = redisCache.keys(); Assertions.assertNotNull(redisKeys); Assertions.assertNotNull(redisKeys.primaryKey()); Assertions.assertNotNull(redisKeys.secondaryKey()); RedisAccessKeys oldKeys = redisCache.refreshKeys(); RedisAccessKeys updatedPrimaryKey = redisCache.regenerateKey(RedisKeyType.PRIMARY); RedisAccessKeys updatedSecondaryKey = redisCache.regenerateKey(RedisKeyType.SECONDARY); Assertions.assertNotNull(oldKeys); Assertions.assertNotNull(updatedPrimaryKey); Assertions.assertNotNull(updatedSecondaryKey); if (!isPlaybackMode()) { Assertions.assertNotEquals(oldKeys.primaryKey(), updatedPrimaryKey.primaryKey()); Assertions.assertEquals(oldKeys.secondaryKey(), updatedPrimaryKey.secondaryKey()); Assertions.assertNotEquals(oldKeys.secondaryKey(), updatedSecondaryKey.secondaryKey()); Assertions.assertNotEquals(updatedPrimaryKey.secondaryKey(), updatedSecondaryKey.secondaryKey()); Assertions.assertEquals(updatedPrimaryKey.primaryKey(), updatedSecondaryKey.primaryKey()); } redisCache = redisCache.update().withStandardSku().apply(); Assertions.assertEquals(SkuName.STANDARD, redisCache.sku().name()); Assertions.assertEquals(SkuFamily.C, redisCache.sku().family()); try { redisCache.update().withBasicSku(1).apply(); Assertions.fail(); } catch (ManagementException e) { } redisCache.refresh(); redisManager.redisCaches().deleteById(redisCache.id()); /*premiumCache.exportData(storageAccount.name(),"snapshot1"); premiumCache.importData(Arrays.asList("snapshot1"));*/ } @Test @Test public void canCRUDLinkedServers() throws Exception { RedisCache rgg = redisManager .redisCaches() .define(rrNameThird) .withRegion(Region.US_CENTRAL) .withNewResourceGroup(rgNameSecond) .withPremiumSku(2) .withPatchSchedule(DayOfWeek.SATURDAY, 5, Duration.ofHours(5)) .withRedisConfiguration("maxclients", "2") .withNonSslPort() .withFirewallRule("rule1", "192.168.0.1", "192.168.0.4") .withFirewallRule("rule2", "192.168.0.10", "192.168.0.40") .create(); RedisCache rggLinked = redisManager .redisCaches() .define(rrNameSecond) .withRegion(Region.US_EAST) .withExistingResourceGroup(rgNameSecond) .withPremiumSku(2) .create(); Assertions.assertNotNull(rgg); Assertions.assertNotNull(rggLinked); RedisCachePremium premiumRgg = rgg.asPremium(); String llName = premiumRgg.addLinkedServer(rggLinked.id(), rggLinked.regionName(), ReplicationRole.PRIMARY); Assertions.assertEquals(ResourceUtils.nameFromResourceId(rggLinked.id()), llName); Map<String, ReplicationRole> linkedServers = premiumRgg.listLinkedServers(); Assertions.assertEquals(1, linkedServers.size()); Assertions.assertTrue(linkedServers.keySet().contains(llName)); Assertions.assertEquals(ReplicationRole.PRIMARY, linkedServers.get(llName)); ReplicationRole repRole = premiumRgg.getLinkedServerRole(llName); Assertions.assertEquals(ReplicationRole.PRIMARY, repRole); premiumRgg.removeLinkedServer(llName); rgg.update().withoutPatchSchedule().apply(); rggLinked.update().withFirewallRule("rulesmhule", "192.168.1.10", "192.168.1.20").apply(); linkedServers = premiumRgg.listLinkedServers(); Assertions.assertEquals(0, linkedServers.size()); } }
please help change to ResourceManagerUtils.sleep
public void canCRUDRedisCache() throws Exception { Creatable<ResourceGroup> resourceGroups = resourceManager.resourceGroups().define(rgNameSecond).withRegion(Region.US_CENTRAL); Creatable<RedisCache> redisCacheDefinition1 = redisManager .redisCaches() .define(rrName) .withRegion(Region.ASIA_EAST) .withNewResourceGroup(rgName) .withBasicSku(); Creatable<RedisCache> redisCacheDefinition2 = redisManager .redisCaches() .define(rrNameSecond) .withRegion(Region.US_CENTRAL) .withNewResourceGroup(resourceGroups) .withPremiumSku() .withShardCount(2) .withPatchSchedule(DayOfWeek.SUNDAY, 10, Duration.ofMinutes(302)); Creatable<RedisCache> redisCacheDefinition3 = redisManager .redisCaches() .define(rrNameThird) .withRegion(Region.US_CENTRAL) .withNewResourceGroup(resourceGroups) .withPremiumSku(2) .withRedisConfiguration("maxclients", "2") .withNonSslPort() .withFirewallRule("rule1", "192.168.0.1", "192.168.0.4") .withFirewallRule("rule2", "192.168.0.10", "192.168.0.40"); CreatedResources<RedisCache> batchRedisCaches = redisManager.redisCaches().create(redisCacheDefinition1, redisCacheDefinition2, redisCacheDefinition3); RedisCache redisCache = batchRedisCaches.get(redisCacheDefinition1.key()); RedisCache redisCachePremium = batchRedisCaches.get(redisCacheDefinition3.key()); Assertions.assertEquals(rgName, redisCache.resourceGroupName()); Assertions.assertEquals(SkuName.BASIC, redisCache.sku().name()); RedisCachePremium premiumCache = redisCachePremium.asPremium(); Assertions.assertEquals(SkuFamily.P, premiumCache.sku().family()); Assertions.assertEquals(2, premiumCache.firewallRules().size()); Assertions.assertTrue(premiumCache.firewallRules().containsKey("rule1")); Assertions.assertTrue(premiumCache.firewallRules().containsKey("rule2")); premiumCache .update() .withRedisConfiguration("maxclients", "3") .withoutFirewallRule("rule1") .withFirewallRule("rule3", "192.168.0.10", "192.168.0.104") .withoutMinimumTlsVersion() .apply(); Thread.sleep(10000); premiumCache.refresh(); Assertions.assertEquals(2, premiumCache.firewallRules().size()); Assertions.assertTrue(premiumCache.firewallRules().containsKey("rule2")); Assertions.assertTrue(premiumCache.firewallRules().containsKey("rule3")); Assertions.assertFalse(premiumCache.firewallRules().containsKey("rule1")); premiumCache.update().withoutRedisConfiguration("maxclients").apply(); premiumCache.update().withoutRedisConfiguration().apply(); Assertions.assertEquals(0, premiumCache.patchSchedules().size()); premiumCache.update().withPatchSchedule(DayOfWeek.MONDAY, 1).withPatchSchedule(DayOfWeek.TUESDAY, 5).apply(); Assertions.assertEquals(2, premiumCache.patchSchedules().size()); premiumCache.forceReboot(RebootType.ALL_NODES); List<ScheduleEntry> patchSchedule = premiumCache.listPatchSchedules(); Assertions.assertEquals(2, patchSchedule.size()); premiumCache.deletePatchSchedule(); patchSchedule = redisManager.redisCaches().getById(premiumCache.id()).asPremium().listPatchSchedules(); Assertions.assertNull(patchSchedule); List<RedisCache> redisCaches = redisManager.redisCaches().listByResourceGroup(rgName).stream().collect(Collectors.toList()); boolean found = false; for (RedisCache existingRedisCache : redisCaches) { if (existingRedisCache.name().equals(rrName)) { found = true; } } Assertions.assertTrue(found); Assertions.assertEquals(1, redisCaches.size()); redisCaches = redisManager.redisCaches().list().stream().collect(Collectors.toList()); found = false; for (RedisCache existingRedisCache : redisCaches) { if (existingRedisCache.name().equals(rrName)) { found = true; } } Assertions.assertTrue(found); Assertions.assertTrue(redisCaches.size() >= 3); RedisCache redisCacheGet = redisManager.redisCaches().getByResourceGroup(rgName, rrName); Assertions.assertNotNull(redisCacheGet); Assertions.assertEquals(redisCache.id(), redisCacheGet.id()); Assertions.assertEquals(redisCache.provisioningState(), redisCacheGet.provisioningState()); RedisAccessKeys redisKeys = redisCache.keys(); Assertions.assertNotNull(redisKeys); Assertions.assertNotNull(redisKeys.primaryKey()); Assertions.assertNotNull(redisKeys.secondaryKey()); RedisAccessKeys oldKeys = redisCache.refreshKeys(); RedisAccessKeys updatedPrimaryKey = redisCache.regenerateKey(RedisKeyType.PRIMARY); RedisAccessKeys updatedSecondaryKey = redisCache.regenerateKey(RedisKeyType.SECONDARY); Assertions.assertNotNull(oldKeys); Assertions.assertNotNull(updatedPrimaryKey); Assertions.assertNotNull(updatedSecondaryKey); if (!isPlaybackMode()) { Assertions.assertNotEquals(oldKeys.primaryKey(), updatedPrimaryKey.primaryKey()); Assertions.assertEquals(oldKeys.secondaryKey(), updatedPrimaryKey.secondaryKey()); Assertions.assertNotEquals(oldKeys.secondaryKey(), updatedSecondaryKey.secondaryKey()); Assertions.assertNotEquals(updatedPrimaryKey.secondaryKey(), updatedSecondaryKey.secondaryKey()); Assertions.assertEquals(updatedPrimaryKey.primaryKey(), updatedSecondaryKey.primaryKey()); } redisCache = redisCache.update().withStandardSku().apply(); Assertions.assertEquals(SkuName.STANDARD, redisCache.sku().name()); Assertions.assertEquals(SkuFamily.C, redisCache.sku().family()); try { redisCache.update().withBasicSku(1).apply(); Assertions.fail(); } catch (ManagementException e) { } redisCache.refresh(); redisManager.redisCaches().deleteById(redisCache.id()); /*premiumCache.exportData(storageAccount.name(),"snapshot1"); premiumCache.importData(Arrays.asList("snapshot1"));*/ }
Thread.sleep(10000);
public void canCRUDRedisCache() throws Exception { Creatable<ResourceGroup> resourceGroups = resourceManager.resourceGroups().define(rgNameSecond).withRegion(Region.US_CENTRAL); Creatable<RedisCache> redisCacheDefinition1 = redisManager .redisCaches() .define(rrName) .withRegion(Region.ASIA_EAST) .withNewResourceGroup(rgName) .withBasicSku(); Creatable<RedisCache> redisCacheDefinition2 = redisManager .redisCaches() .define(rrNameSecond) .withRegion(Region.US_CENTRAL) .withNewResourceGroup(resourceGroups) .withPremiumSku() .withShardCount(2) .withPatchSchedule(DayOfWeek.SUNDAY, 10, Duration.ofMinutes(302)); Creatable<RedisCache> redisCacheDefinition3 = redisManager .redisCaches() .define(rrNameThird) .withRegion(Region.US_CENTRAL) .withNewResourceGroup(resourceGroups) .withPremiumSku(2) .withRedisConfiguration("maxclients", "2") .withNonSslPort() .withFirewallRule("rule1", "192.168.0.1", "192.168.0.4") .withFirewallRule("rule2", "192.168.0.10", "192.168.0.40"); CreatedResources<RedisCache> batchRedisCaches = redisManager.redisCaches().create(redisCacheDefinition1, redisCacheDefinition2, redisCacheDefinition3); RedisCache redisCache = batchRedisCaches.get(redisCacheDefinition1.key()); RedisCache redisCachePremium = batchRedisCaches.get(redisCacheDefinition3.key()); Assertions.assertEquals(rgName, redisCache.resourceGroupName()); Assertions.assertEquals(SkuName.BASIC, redisCache.sku().name()); RedisCachePremium premiumCache = redisCachePremium.asPremium(); Assertions.assertEquals(SkuFamily.P, premiumCache.sku().family()); Assertions.assertEquals(2, premiumCache.firewallRules().size()); Assertions.assertTrue(premiumCache.firewallRules().containsKey("rule1")); Assertions.assertTrue(premiumCache.firewallRules().containsKey("rule2")); premiumCache .update() .withRedisConfiguration("maxclients", "3") .withoutFirewallRule("rule1") .withFirewallRule("rule3", "192.168.0.10", "192.168.0.104") .withoutMinimumTlsVersion() .apply(); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); premiumCache.refresh(); Assertions.assertEquals(2, premiumCache.firewallRules().size()); Assertions.assertTrue(premiumCache.firewallRules().containsKey("rule2")); Assertions.assertTrue(premiumCache.firewallRules().containsKey("rule3")); Assertions.assertFalse(premiumCache.firewallRules().containsKey("rule1")); premiumCache.update().withoutRedisConfiguration("maxclients").apply(); premiumCache.update().withoutRedisConfiguration().apply(); Assertions.assertEquals(0, premiumCache.patchSchedules().size()); premiumCache.update().withPatchSchedule(DayOfWeek.MONDAY, 1).withPatchSchedule(DayOfWeek.TUESDAY, 5).apply(); Assertions.assertEquals(2, premiumCache.patchSchedules().size()); premiumCache.forceReboot(RebootType.ALL_NODES); List<ScheduleEntry> patchSchedule = premiumCache.listPatchSchedules(); Assertions.assertEquals(2, patchSchedule.size()); premiumCache.deletePatchSchedule(); patchSchedule = redisManager.redisCaches().getById(premiumCache.id()).asPremium().listPatchSchedules(); Assertions.assertNull(patchSchedule); List<RedisCache> redisCaches = redisManager.redisCaches().listByResourceGroup(rgName).stream().collect(Collectors.toList()); boolean found = false; for (RedisCache existingRedisCache : redisCaches) { if (existingRedisCache.name().equals(rrName)) { found = true; } } Assertions.assertTrue(found); Assertions.assertEquals(1, redisCaches.size()); redisCaches = redisManager.redisCaches().list().stream().collect(Collectors.toList()); found = false; for (RedisCache existingRedisCache : redisCaches) { if (existingRedisCache.name().equals(rrName)) { found = true; } } Assertions.assertTrue(found); Assertions.assertTrue(redisCaches.size() >= 3); RedisCache redisCacheGet = redisManager.redisCaches().getByResourceGroup(rgName, rrName); Assertions.assertNotNull(redisCacheGet); Assertions.assertEquals(redisCache.id(), redisCacheGet.id()); Assertions.assertEquals(redisCache.provisioningState(), redisCacheGet.provisioningState()); RedisAccessKeys redisKeys = redisCache.keys(); Assertions.assertNotNull(redisKeys); Assertions.assertNotNull(redisKeys.primaryKey()); Assertions.assertNotNull(redisKeys.secondaryKey()); RedisAccessKeys oldKeys = redisCache.refreshKeys(); RedisAccessKeys updatedPrimaryKey = redisCache.regenerateKey(RedisKeyType.PRIMARY); RedisAccessKeys updatedSecondaryKey = redisCache.regenerateKey(RedisKeyType.SECONDARY); Assertions.assertNotNull(oldKeys); Assertions.assertNotNull(updatedPrimaryKey); Assertions.assertNotNull(updatedSecondaryKey); if (!isPlaybackMode()) { Assertions.assertNotEquals(oldKeys.primaryKey(), updatedPrimaryKey.primaryKey()); Assertions.assertEquals(oldKeys.secondaryKey(), updatedPrimaryKey.secondaryKey()); Assertions.assertNotEquals(oldKeys.secondaryKey(), updatedSecondaryKey.secondaryKey()); Assertions.assertNotEquals(updatedPrimaryKey.secondaryKey(), updatedSecondaryKey.secondaryKey()); Assertions.assertEquals(updatedPrimaryKey.primaryKey(), updatedSecondaryKey.primaryKey()); } redisCache = redisCache.update().withStandardSku().apply(); Assertions.assertEquals(SkuName.STANDARD, redisCache.sku().name()); Assertions.assertEquals(SkuFamily.C, redisCache.sku().family()); try { redisCache.update().withBasicSku(1).apply(); Assertions.fail(); } catch (ManagementException e) { } redisCache.refresh(); redisManager.redisCaches().deleteById(redisCache.id()); /*premiumCache.exportData(storageAccount.name(),"snapshot1"); premiumCache.importData(Arrays.asList("snapshot1"));*/ }
class RedisCacheOperationsTests extends RedisManagementTest { @Test @SuppressWarnings("unchecked") @Test public void canRedisVersionUpdate(){ RedisCache.MajorVersion redisVersion = RedisCache.MajorVersion.V4; RedisCache redisCache = redisManager .redisCaches() .define(rrName) .withRegion(Region.ASIA_EAST) .withNewResourceGroup(rgName) .withBasicSku() .withRedisVersion(redisVersion) .create() ; Assertions.assertTrue(redisCache.redisVersion().startsWith(redisVersion.getValue())); redisVersion = RedisCache.MajorVersion.V6; redisCache = redisCache.update() .withRedisVersion(redisVersion) .apply(); ResourceManagerUtils.sleep(Duration.ofSeconds(300)); redisCache = redisCache.refresh(); Assertions.assertTrue(redisCache.redisVersion().startsWith(redisVersion.getValue())); } @Test public void canCRUDLinkedServers() throws Exception { RedisCache rgg = redisManager .redisCaches() .define(rrNameThird) .withRegion(Region.US_CENTRAL) .withNewResourceGroup(rgNameSecond) .withPremiumSku(2) .withPatchSchedule(DayOfWeek.SATURDAY, 5, Duration.ofHours(5)) .withRedisConfiguration("maxclients", "2") .withNonSslPort() .withFirewallRule("rule1", "192.168.0.1", "192.168.0.4") .withFirewallRule("rule2", "192.168.0.10", "192.168.0.40") .create(); RedisCache rggLinked = redisManager .redisCaches() .define(rrNameSecond) .withRegion(Region.US_EAST) .withExistingResourceGroup(rgNameSecond) .withPremiumSku(2) .create(); Assertions.assertNotNull(rgg); Assertions.assertNotNull(rggLinked); RedisCachePremium premiumRgg = rgg.asPremium(); String llName = premiumRgg.addLinkedServer(rggLinked.id(), rggLinked.regionName(), ReplicationRole.PRIMARY); Assertions.assertEquals(ResourceUtils.nameFromResourceId(rggLinked.id()), llName); Map<String, ReplicationRole> linkedServers = premiumRgg.listLinkedServers(); Assertions.assertEquals(1, linkedServers.size()); Assertions.assertTrue(linkedServers.keySet().contains(llName)); Assertions.assertEquals(ReplicationRole.PRIMARY, linkedServers.get(llName)); ReplicationRole repRole = premiumRgg.getLinkedServerRole(llName); Assertions.assertEquals(ReplicationRole.PRIMARY, repRole); premiumRgg.removeLinkedServer(llName); rgg.update().withoutPatchSchedule().apply(); rggLinked.update().withFirewallRule("rulesmhule", "192.168.1.10", "192.168.1.20").apply(); linkedServers = premiumRgg.listLinkedServers(); Assertions.assertEquals(0, linkedServers.size()); } }
class RedisCacheOperationsTests extends RedisManagementTest { @Test @SuppressWarnings("unchecked") @Test public void canRedisVersionUpdate() { RedisCache.RedisVersion redisVersion = RedisCache.RedisVersion.V4; RedisCache redisCache = redisManager .redisCaches() .define(rrName) .withRegion(Region.ASIA_EAST) .withNewResourceGroup(rgName) .withBasicSku() .withRedisVersion(redisVersion) .create(); Assertions.assertTrue(redisCache.redisVersion().startsWith(redisVersion.getValue())); redisVersion = RedisCache.RedisVersion.V6; redisCache = redisCache.update() .withRedisVersion(redisVersion) .apply(); ResourceManagerUtils.sleep(Duration.ofSeconds(300)); redisCache = redisCache.refresh(); Assertions.assertTrue(redisCache.redisVersion().startsWith(redisVersion.getValue())); } @Test public void canCRUDLinkedServers() throws Exception { RedisCache rgg = redisManager .redisCaches() .define(rrNameThird) .withRegion(Region.US_CENTRAL) .withNewResourceGroup(rgNameSecond) .withPremiumSku(2) .withPatchSchedule(DayOfWeek.SATURDAY, 5, Duration.ofHours(5)) .withRedisConfiguration("maxclients", "2") .withNonSslPort() .withFirewallRule("rule1", "192.168.0.1", "192.168.0.4") .withFirewallRule("rule2", "192.168.0.10", "192.168.0.40") .create(); RedisCache rggLinked = redisManager .redisCaches() .define(rrNameSecond) .withRegion(Region.US_EAST) .withExistingResourceGroup(rgNameSecond) .withPremiumSku(2) .create(); Assertions.assertNotNull(rgg); Assertions.assertNotNull(rggLinked); RedisCachePremium premiumRgg = rgg.asPremium(); String llName = premiumRgg.addLinkedServer(rggLinked.id(), rggLinked.regionName(), ReplicationRole.PRIMARY); Assertions.assertEquals(ResourceUtils.nameFromResourceId(rggLinked.id()), llName); Map<String, ReplicationRole> linkedServers = premiumRgg.listLinkedServers(); Assertions.assertEquals(1, linkedServers.size()); Assertions.assertTrue(linkedServers.keySet().contains(llName)); Assertions.assertEquals(ReplicationRole.PRIMARY, linkedServers.get(llName)); ReplicationRole repRole = premiumRgg.getLinkedServerRole(llName); Assertions.assertEquals(ReplicationRole.PRIMARY, repRole); premiumRgg.removeLinkedServer(llName); rgg.update().withoutPatchSchedule().apply(); rggLinked.update().withFirewallRule("rulesmhule", "192.168.1.10", "192.168.1.20").apply(); linkedServers = premiumRgg.listLinkedServers(); Assertions.assertEquals(0, linkedServers.size()); } }
check consistency of comments
public void canRedisVersionUpdate(){ RedisCache.MajorVersion redisVersion = RedisCache.MajorVersion.V4; RedisCache redisCache = redisManager .redisCaches() .define(rrName) .withRegion(Region.ASIA_EAST) .withNewResourceGroup(rgName) .withBasicSku() .withRedisVersion(redisVersion) .create() ; Assertions.assertTrue(redisCache.redisVersion().startsWith(redisVersion.getValue())); redisVersion = RedisCache.MajorVersion.V6; redisCache = redisCache.update() .withRedisVersion(redisVersion) .apply(); ResourceManagerUtils.sleep(Duration.ofSeconds(300)); redisCache = redisCache.refresh(); Assertions.assertTrue(redisCache.redisVersion().startsWith(redisVersion.getValue())); }
ResourceManagerUtils.sleep(Duration.ofSeconds(300));
public void canRedisVersionUpdate() { RedisCache.RedisVersion redisVersion = RedisCache.RedisVersion.V4; RedisCache redisCache = redisManager .redisCaches() .define(rrName) .withRegion(Region.ASIA_EAST) .withNewResourceGroup(rgName) .withBasicSku() .withRedisVersion(redisVersion) .create(); Assertions.assertTrue(redisCache.redisVersion().startsWith(redisVersion.getValue())); redisVersion = RedisCache.RedisVersion.V6; redisCache = redisCache.update() .withRedisVersion(redisVersion) .apply(); ResourceManagerUtils.sleep(Duration.ofSeconds(300)); redisCache = redisCache.refresh(); Assertions.assertTrue(redisCache.redisVersion().startsWith(redisVersion.getValue())); }
class RedisCacheOperationsTests extends RedisManagementTest { @Test @SuppressWarnings("unchecked") public void canCRUDRedisCache() throws Exception { Creatable<ResourceGroup> resourceGroups = resourceManager.resourceGroups().define(rgNameSecond).withRegion(Region.US_CENTRAL); Creatable<RedisCache> redisCacheDefinition1 = redisManager .redisCaches() .define(rrName) .withRegion(Region.ASIA_EAST) .withNewResourceGroup(rgName) .withBasicSku(); Creatable<RedisCache> redisCacheDefinition2 = redisManager .redisCaches() .define(rrNameSecond) .withRegion(Region.US_CENTRAL) .withNewResourceGroup(resourceGroups) .withPremiumSku() .withShardCount(2) .withPatchSchedule(DayOfWeek.SUNDAY, 10, Duration.ofMinutes(302)); Creatable<RedisCache> redisCacheDefinition3 = redisManager .redisCaches() .define(rrNameThird) .withRegion(Region.US_CENTRAL) .withNewResourceGroup(resourceGroups) .withPremiumSku(2) .withRedisConfiguration("maxclients", "2") .withNonSslPort() .withFirewallRule("rule1", "192.168.0.1", "192.168.0.4") .withFirewallRule("rule2", "192.168.0.10", "192.168.0.40"); CreatedResources<RedisCache> batchRedisCaches = redisManager.redisCaches().create(redisCacheDefinition1, redisCacheDefinition2, redisCacheDefinition3); RedisCache redisCache = batchRedisCaches.get(redisCacheDefinition1.key()); RedisCache redisCachePremium = batchRedisCaches.get(redisCacheDefinition3.key()); Assertions.assertEquals(rgName, redisCache.resourceGroupName()); Assertions.assertEquals(SkuName.BASIC, redisCache.sku().name()); RedisCachePremium premiumCache = redisCachePremium.asPremium(); Assertions.assertEquals(SkuFamily.P, premiumCache.sku().family()); Assertions.assertEquals(2, premiumCache.firewallRules().size()); Assertions.assertTrue(premiumCache.firewallRules().containsKey("rule1")); Assertions.assertTrue(premiumCache.firewallRules().containsKey("rule2")); premiumCache .update() .withRedisConfiguration("maxclients", "3") .withoutFirewallRule("rule1") .withFirewallRule("rule3", "192.168.0.10", "192.168.0.104") .withoutMinimumTlsVersion() .apply(); Thread.sleep(10000); premiumCache.refresh(); Assertions.assertEquals(2, premiumCache.firewallRules().size()); Assertions.assertTrue(premiumCache.firewallRules().containsKey("rule2")); Assertions.assertTrue(premiumCache.firewallRules().containsKey("rule3")); Assertions.assertFalse(premiumCache.firewallRules().containsKey("rule1")); premiumCache.update().withoutRedisConfiguration("maxclients").apply(); premiumCache.update().withoutRedisConfiguration().apply(); Assertions.assertEquals(0, premiumCache.patchSchedules().size()); premiumCache.update().withPatchSchedule(DayOfWeek.MONDAY, 1).withPatchSchedule(DayOfWeek.TUESDAY, 5).apply(); Assertions.assertEquals(2, premiumCache.patchSchedules().size()); premiumCache.forceReboot(RebootType.ALL_NODES); List<ScheduleEntry> patchSchedule = premiumCache.listPatchSchedules(); Assertions.assertEquals(2, patchSchedule.size()); premiumCache.deletePatchSchedule(); patchSchedule = redisManager.redisCaches().getById(premiumCache.id()).asPremium().listPatchSchedules(); Assertions.assertNull(patchSchedule); List<RedisCache> redisCaches = redisManager.redisCaches().listByResourceGroup(rgName).stream().collect(Collectors.toList()); boolean found = false; for (RedisCache existingRedisCache : redisCaches) { if (existingRedisCache.name().equals(rrName)) { found = true; } } Assertions.assertTrue(found); Assertions.assertEquals(1, redisCaches.size()); redisCaches = redisManager.redisCaches().list().stream().collect(Collectors.toList()); found = false; for (RedisCache existingRedisCache : redisCaches) { if (existingRedisCache.name().equals(rrName)) { found = true; } } Assertions.assertTrue(found); Assertions.assertTrue(redisCaches.size() >= 3); RedisCache redisCacheGet = redisManager.redisCaches().getByResourceGroup(rgName, rrName); Assertions.assertNotNull(redisCacheGet); Assertions.assertEquals(redisCache.id(), redisCacheGet.id()); Assertions.assertEquals(redisCache.provisioningState(), redisCacheGet.provisioningState()); RedisAccessKeys redisKeys = redisCache.keys(); Assertions.assertNotNull(redisKeys); Assertions.assertNotNull(redisKeys.primaryKey()); Assertions.assertNotNull(redisKeys.secondaryKey()); RedisAccessKeys oldKeys = redisCache.refreshKeys(); RedisAccessKeys updatedPrimaryKey = redisCache.regenerateKey(RedisKeyType.PRIMARY); RedisAccessKeys updatedSecondaryKey = redisCache.regenerateKey(RedisKeyType.SECONDARY); Assertions.assertNotNull(oldKeys); Assertions.assertNotNull(updatedPrimaryKey); Assertions.assertNotNull(updatedSecondaryKey); if (!isPlaybackMode()) { Assertions.assertNotEquals(oldKeys.primaryKey(), updatedPrimaryKey.primaryKey()); Assertions.assertEquals(oldKeys.secondaryKey(), updatedPrimaryKey.secondaryKey()); Assertions.assertNotEquals(oldKeys.secondaryKey(), updatedSecondaryKey.secondaryKey()); Assertions.assertNotEquals(updatedPrimaryKey.secondaryKey(), updatedSecondaryKey.secondaryKey()); Assertions.assertEquals(updatedPrimaryKey.primaryKey(), updatedSecondaryKey.primaryKey()); } redisCache = redisCache.update().withStandardSku().apply(); Assertions.assertEquals(SkuName.STANDARD, redisCache.sku().name()); Assertions.assertEquals(SkuFamily.C, redisCache.sku().family()); try { redisCache.update().withBasicSku(1).apply(); Assertions.fail(); } catch (ManagementException e) { } redisCache.refresh(); redisManager.redisCaches().deleteById(redisCache.id()); /*premiumCache.exportData(storageAccount.name(),"snapshot1"); premiumCache.importData(Arrays.asList("snapshot1"));*/ } @Test @Test public void canCRUDLinkedServers() throws Exception { RedisCache rgg = redisManager .redisCaches() .define(rrNameThird) .withRegion(Region.US_CENTRAL) .withNewResourceGroup(rgNameSecond) .withPremiumSku(2) .withPatchSchedule(DayOfWeek.SATURDAY, 5, Duration.ofHours(5)) .withRedisConfiguration("maxclients", "2") .withNonSslPort() .withFirewallRule("rule1", "192.168.0.1", "192.168.0.4") .withFirewallRule("rule2", "192.168.0.10", "192.168.0.40") .create(); RedisCache rggLinked = redisManager .redisCaches() .define(rrNameSecond) .withRegion(Region.US_EAST) .withExistingResourceGroup(rgNameSecond) .withPremiumSku(2) .create(); Assertions.assertNotNull(rgg); Assertions.assertNotNull(rggLinked); RedisCachePremium premiumRgg = rgg.asPremium(); String llName = premiumRgg.addLinkedServer(rggLinked.id(), rggLinked.regionName(), ReplicationRole.PRIMARY); Assertions.assertEquals(ResourceUtils.nameFromResourceId(rggLinked.id()), llName); Map<String, ReplicationRole> linkedServers = premiumRgg.listLinkedServers(); Assertions.assertEquals(1, linkedServers.size()); Assertions.assertTrue(linkedServers.keySet().contains(llName)); Assertions.assertEquals(ReplicationRole.PRIMARY, linkedServers.get(llName)); ReplicationRole repRole = premiumRgg.getLinkedServerRole(llName); Assertions.assertEquals(ReplicationRole.PRIMARY, repRole); premiumRgg.removeLinkedServer(llName); rgg.update().withoutPatchSchedule().apply(); rggLinked.update().withFirewallRule("rulesmhule", "192.168.1.10", "192.168.1.20").apply(); linkedServers = premiumRgg.listLinkedServers(); Assertions.assertEquals(0, linkedServers.size()); } }
class RedisCacheOperationsTests extends RedisManagementTest { @Test @SuppressWarnings("unchecked") public void canCRUDRedisCache() throws Exception { Creatable<ResourceGroup> resourceGroups = resourceManager.resourceGroups().define(rgNameSecond).withRegion(Region.US_CENTRAL); Creatable<RedisCache> redisCacheDefinition1 = redisManager .redisCaches() .define(rrName) .withRegion(Region.ASIA_EAST) .withNewResourceGroup(rgName) .withBasicSku(); Creatable<RedisCache> redisCacheDefinition2 = redisManager .redisCaches() .define(rrNameSecond) .withRegion(Region.US_CENTRAL) .withNewResourceGroup(resourceGroups) .withPremiumSku() .withShardCount(2) .withPatchSchedule(DayOfWeek.SUNDAY, 10, Duration.ofMinutes(302)); Creatable<RedisCache> redisCacheDefinition3 = redisManager .redisCaches() .define(rrNameThird) .withRegion(Region.US_CENTRAL) .withNewResourceGroup(resourceGroups) .withPremiumSku(2) .withRedisConfiguration("maxclients", "2") .withNonSslPort() .withFirewallRule("rule1", "192.168.0.1", "192.168.0.4") .withFirewallRule("rule2", "192.168.0.10", "192.168.0.40"); CreatedResources<RedisCache> batchRedisCaches = redisManager.redisCaches().create(redisCacheDefinition1, redisCacheDefinition2, redisCacheDefinition3); RedisCache redisCache = batchRedisCaches.get(redisCacheDefinition1.key()); RedisCache redisCachePremium = batchRedisCaches.get(redisCacheDefinition3.key()); Assertions.assertEquals(rgName, redisCache.resourceGroupName()); Assertions.assertEquals(SkuName.BASIC, redisCache.sku().name()); RedisCachePremium premiumCache = redisCachePremium.asPremium(); Assertions.assertEquals(SkuFamily.P, premiumCache.sku().family()); Assertions.assertEquals(2, premiumCache.firewallRules().size()); Assertions.assertTrue(premiumCache.firewallRules().containsKey("rule1")); Assertions.assertTrue(premiumCache.firewallRules().containsKey("rule2")); premiumCache .update() .withRedisConfiguration("maxclients", "3") .withoutFirewallRule("rule1") .withFirewallRule("rule3", "192.168.0.10", "192.168.0.104") .withoutMinimumTlsVersion() .apply(); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); premiumCache.refresh(); Assertions.assertEquals(2, premiumCache.firewallRules().size()); Assertions.assertTrue(premiumCache.firewallRules().containsKey("rule2")); Assertions.assertTrue(premiumCache.firewallRules().containsKey("rule3")); Assertions.assertFalse(premiumCache.firewallRules().containsKey("rule1")); premiumCache.update().withoutRedisConfiguration("maxclients").apply(); premiumCache.update().withoutRedisConfiguration().apply(); Assertions.assertEquals(0, premiumCache.patchSchedules().size()); premiumCache.update().withPatchSchedule(DayOfWeek.MONDAY, 1).withPatchSchedule(DayOfWeek.TUESDAY, 5).apply(); Assertions.assertEquals(2, premiumCache.patchSchedules().size()); premiumCache.forceReboot(RebootType.ALL_NODES); List<ScheduleEntry> patchSchedule = premiumCache.listPatchSchedules(); Assertions.assertEquals(2, patchSchedule.size()); premiumCache.deletePatchSchedule(); patchSchedule = redisManager.redisCaches().getById(premiumCache.id()).asPremium().listPatchSchedules(); Assertions.assertNull(patchSchedule); List<RedisCache> redisCaches = redisManager.redisCaches().listByResourceGroup(rgName).stream().collect(Collectors.toList()); boolean found = false; for (RedisCache existingRedisCache : redisCaches) { if (existingRedisCache.name().equals(rrName)) { found = true; } } Assertions.assertTrue(found); Assertions.assertEquals(1, redisCaches.size()); redisCaches = redisManager.redisCaches().list().stream().collect(Collectors.toList()); found = false; for (RedisCache existingRedisCache : redisCaches) { if (existingRedisCache.name().equals(rrName)) { found = true; } } Assertions.assertTrue(found); Assertions.assertTrue(redisCaches.size() >= 3); RedisCache redisCacheGet = redisManager.redisCaches().getByResourceGroup(rgName, rrName); Assertions.assertNotNull(redisCacheGet); Assertions.assertEquals(redisCache.id(), redisCacheGet.id()); Assertions.assertEquals(redisCache.provisioningState(), redisCacheGet.provisioningState()); RedisAccessKeys redisKeys = redisCache.keys(); Assertions.assertNotNull(redisKeys); Assertions.assertNotNull(redisKeys.primaryKey()); Assertions.assertNotNull(redisKeys.secondaryKey()); RedisAccessKeys oldKeys = redisCache.refreshKeys(); RedisAccessKeys updatedPrimaryKey = redisCache.regenerateKey(RedisKeyType.PRIMARY); RedisAccessKeys updatedSecondaryKey = redisCache.regenerateKey(RedisKeyType.SECONDARY); Assertions.assertNotNull(oldKeys); Assertions.assertNotNull(updatedPrimaryKey); Assertions.assertNotNull(updatedSecondaryKey); if (!isPlaybackMode()) { Assertions.assertNotEquals(oldKeys.primaryKey(), updatedPrimaryKey.primaryKey()); Assertions.assertEquals(oldKeys.secondaryKey(), updatedPrimaryKey.secondaryKey()); Assertions.assertNotEquals(oldKeys.secondaryKey(), updatedSecondaryKey.secondaryKey()); Assertions.assertNotEquals(updatedPrimaryKey.secondaryKey(), updatedSecondaryKey.secondaryKey()); Assertions.assertEquals(updatedPrimaryKey.primaryKey(), updatedSecondaryKey.primaryKey()); } redisCache = redisCache.update().withStandardSku().apply(); Assertions.assertEquals(SkuName.STANDARD, redisCache.sku().name()); Assertions.assertEquals(SkuFamily.C, redisCache.sku().family()); try { redisCache.update().withBasicSku(1).apply(); Assertions.fail(); } catch (ManagementException e) { } redisCache.refresh(); redisManager.redisCaches().deleteById(redisCache.id()); /*premiumCache.exportData(storageAccount.name(),"snapshot1"); premiumCache.importData(Arrays.asList("snapshot1"));*/ } @Test @Test public void canCRUDLinkedServers() throws Exception { RedisCache rgg = redisManager .redisCaches() .define(rrNameThird) .withRegion(Region.US_CENTRAL) .withNewResourceGroup(rgNameSecond) .withPremiumSku(2) .withPatchSchedule(DayOfWeek.SATURDAY, 5, Duration.ofHours(5)) .withRedisConfiguration("maxclients", "2") .withNonSslPort() .withFirewallRule("rule1", "192.168.0.1", "192.168.0.4") .withFirewallRule("rule2", "192.168.0.10", "192.168.0.40") .create(); RedisCache rggLinked = redisManager .redisCaches() .define(rrNameSecond) .withRegion(Region.US_EAST) .withExistingResourceGroup(rgNameSecond) .withPremiumSku(2) .create(); Assertions.assertNotNull(rgg); Assertions.assertNotNull(rggLinked); RedisCachePremium premiumRgg = rgg.asPremium(); String llName = premiumRgg.addLinkedServer(rggLinked.id(), rggLinked.regionName(), ReplicationRole.PRIMARY); Assertions.assertEquals(ResourceUtils.nameFromResourceId(rggLinked.id()), llName); Map<String, ReplicationRole> linkedServers = premiumRgg.listLinkedServers(); Assertions.assertEquals(1, linkedServers.size()); Assertions.assertTrue(linkedServers.keySet().contains(llName)); Assertions.assertEquals(ReplicationRole.PRIMARY, linkedServers.get(llName)); ReplicationRole repRole = premiumRgg.getLinkedServerRole(llName); Assertions.assertEquals(ReplicationRole.PRIMARY, repRole); premiumRgg.removeLinkedServer(llName); rgg.update().withoutPatchSchedule().apply(); rggLinked.update().withFirewallRule("rulesmhule", "192.168.1.10", "192.168.1.20").apply(); linkedServers = premiumRgg.listLinkedServers(); Assertions.assertEquals(0, linkedServers.size()); } }
I don't think you need the storage account?
public void canCreateVMSSWithPlan() { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); final String uname = "jvuser"; Network network = this .networkManager .networks() .define(generateRandomResourceName("vmssvnet", 15)) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); PurchasePlan plan = new PurchasePlan() .withName("access_server_byol") .withPublisher("openvpn") .withProduct("openvpnas"); ImageReference imageReference = new ImageReference() .withPublisher("openvpn") .withOffer("openvpnas") .withSku("access_server_byol") .withVersion("latest"); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withoutPrimaryInternetFacingLoadBalancer() .withoutPrimaryInternalLoadBalancer() .withSpecificLinuxImageVersion(imageReference) .withRootUsername(uname) .withSsh(sshPublicKey()) .withNewDataDisk(1) .withNewStorageAccount(generateRandomResourceName("stg", 15)) .withPlan(plan) .create(); VirtualMachineScaleSet currentVirtualMachineScaleSet = this.computeManager.virtualMachineScaleSets().getByResourceGroup(rgName, vmssName); Assertions.assertEquals("access_server_byol", currentVirtualMachineScaleSet.plan().name()); Assertions.assertEquals("openvpn", currentVirtualMachineScaleSet.plan().publisher()); Assertions.assertEquals("openvpnas", currentVirtualMachineScaleSet.plan().product()); }
.withNewStorageAccount(generateRandomResourceName("stg", 15))
public void canCreateVMSSWithPlan() { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); final String uname = "jvuser"; Network network = this .networkManager .networks() .define(generateRandomResourceName("vmssvnet", 15)) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); PurchasePlan plan = new PurchasePlan() .withName("access_server_byol") .withPublisher("openvpn") .withProduct("openvpnas"); ImageReference imageReference = new ImageReference() .withPublisher("openvpn") .withOffer("openvpnas") .withSku("access_server_byol") .withVersion("latest"); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withoutPrimaryInternetFacingLoadBalancer() .withoutPrimaryInternalLoadBalancer() .withSpecificLinuxImageVersion(imageReference) .withRootUsername(uname) .withSsh(sshPublicKey()) .withNewDataDisk(1) .withPlan(plan) .create(); VirtualMachineScaleSet currentVirtualMachineScaleSet = this.computeManager.virtualMachineScaleSets().getByResourceGroup(rgName, vmssName); Assertions.assertEquals("access_server_byol", currentVirtualMachineScaleSet.plan().name()); Assertions.assertEquals("openvpn", currentVirtualMachineScaleSet.plan().publisher()); Assertions.assertEquals("openvpnas", currentVirtualMachineScaleSet.plan().product()); }
class VirtualMachineScaleSetOperationsTests extends ComputeManagementTest { private String rgName = ""; private final Region region = Region.US_WEST; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { rgName = generateRandomResourceName("javacsmrg", 15); super.initializeClients(httpPipeline, profile); } @Override protected void cleanUpResources() { if (rgName != null) { resourceManager.resourceGroups().beginDeleteByName(rgName); } } @Test @Test public void canUpdateVirtualMachineScaleSetWithExtensionProtectedSettings() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); final String uname = "jvuser"; final String password = password(); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); StorageAccount storageAccount = this .storageManager .storageAccounts() .define(generateRandomResourceName("stg", 15)) .withRegion(region) .withExistingResourceGroup(resourceGroup) .create(); List<StorageAccountKey> keys = storageAccount.getKeys(); Assertions.assertNotNull(keys); Assertions.assertTrue(keys.size() > 0); String storageAccountKey = keys.get(0).value(); Network network = this .networkManager .networks() .define(generateRandomResourceName("vmssvnet", 15)) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withoutPrimaryInternetFacingLoadBalancer() .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername(uname) .withSsh(sshPublicKey()) .withUnmanagedDisks() .withNewStorageAccount(generateRandomResourceName("stg", 15)) .withExistingStorageAccount(storageAccount) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("commandToExecute", "ls") .withProtectedSetting("storageAccountName", storageAccount.name()) .withProtectedSetting("storageAccountKey", storageAccountKey) .attach() .create(); Map<String, VirtualMachineScaleSetExtension> extensions = virtualMachineScaleSet.extensions(); Assertions.assertNotNull(extensions); Assertions.assertTrue(extensions.size() > 0); Assertions.assertTrue(extensions.containsKey("CustomScriptForLinux")); VirtualMachineScaleSetExtension extension = extensions.get("CustomScriptForLinux"); Assertions.assertNotNull(extension.publicSettings()); Assertions.assertEquals(1, extension.publicSettings().size()); Assertions.assertNotNull(extension.publicSettingsAsJsonString()); VirtualMachineScaleSet scaleSet = this.computeManager.virtualMachineScaleSets().getById(virtualMachineScaleSet.id()); extensions = scaleSet.extensions(); Assertions.assertNotNull(extensions); Assertions.assertTrue(extensions.size() > 0); Assertions.assertTrue(extensions.containsKey("CustomScriptForLinux")); extension = extensions.get("CustomScriptForLinux"); Assertions.assertNotNull(extension.publicSettings()); Assertions.assertEquals(1, extension.publicSettings().size()); Assertions.assertNotNull(extension.publicSettingsAsJsonString()); int newCapacity = scaleSet.capacity() + 1; virtualMachineScaleSet.update().withCapacity(newCapacity).apply(); extensions = virtualMachineScaleSet.extensions(); Assertions.assertNotNull(extensions); Assertions.assertTrue(extensions.size() > 0); Assertions.assertTrue(extensions.containsKey("CustomScriptForLinux")); extension = extensions.get("CustomScriptForLinux"); Assertions.assertNotNull(extension.publicSettings()); Assertions.assertEquals(1, extension.publicSettings().size()); Assertions.assertNotNull(extension.publicSettingsAsJsonString()); } @Test public void canCreateVirtualMachineScaleSetWithCustomScriptExtension() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); final String uname = "jvuser"; final String password = password(); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define(generateRandomResourceName("vmssvnet", 15)) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, resourceGroup, "1"); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername(uname) .withSsh(sshPublicKey()) .withUnmanagedDisks() .withNewStorageAccount(generateRandomResourceName("stg", 15)) .withNewStorageAccount(generateRandomResourceName("stg", 15)) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("commandToExecute", "ls") .attach() .withUpgradeMode(UpgradeMode.MANUAL) .create(); checkVMInstances(virtualMachineScaleSet); List<String> publicIPAddressIds = virtualMachineScaleSet.primaryPublicIpAddressIds(); PublicIpAddress publicIPAddress = this.networkManager.publicIpAddresses().getById(publicIPAddressIds.get(0)); String fqdn = publicIPAddress.fqdn(); if (!isPlaybackMode()) { HttpClient client = new NettyAsyncHttpClientBuilder().build(); HttpRequest request = new HttpRequest(HttpMethod.GET, "http: HttpResponse response = client.send(request).block(); Assertions.assertEquals(response.getStatusCode(), 200); } for (VirtualMachineScaleSetVM vm : virtualMachineScaleSet.virtualMachines().list()) { PagedIterable<VirtualMachineScaleSetNetworkInterface> networkInterfaces = vm.listNetworkInterfaces(); Assertions.assertEquals(TestUtilities.getSize(networkInterfaces), 1); VirtualMachineScaleSetNetworkInterface networkInterface = networkInterfaces.iterator().next(); VirtualMachineScaleSetNicIpConfiguration primaryIpConfig = null; primaryIpConfig = networkInterface.primaryIPConfiguration(); Assertions.assertNotNull(primaryIpConfig); Integer sshFrontendPort = null; List<LoadBalancerInboundNatRule> natRules = primaryIpConfig.listAssociatedLoadBalancerInboundNatRules(); for (LoadBalancerInboundNatRule natRule : natRules) { if (natRule.backendPort() == 22) { sshFrontendPort = natRule.frontendPort(); break; } } Assertions.assertNotNull(sshFrontendPort); this.sleep(1000 * 60); this.ensureCanDoSsh(fqdn, sshFrontendPort, uname, password); } } @Test public void canCreateVirtualMachineScaleSetWithOptionalNetworkSettings() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); final String vmssVmDnsLabel = generateRandomResourceName("pip", 10); final String nsgName = generateRandomResourceName("nsg", 10); final String asgName = generateRandomResourceName("asg", 8); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); ApplicationSecurityGroup asg = this .networkManager .applicationSecurityGroups() .define(asgName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .create(); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_D3_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withoutPrimaryInternetFacingLoadBalancer() .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withVirtualMachinePublicIp(vmssVmDnsLabel) .withExistingApplicationSecurityGroup(asg) .create(); VirtualMachineScaleSetPublicIpAddressConfiguration currentIpConfig = virtualMachineScaleSet.virtualMachinePublicIpConfig(); Assertions.assertNotNull(currentIpConfig); Assertions.assertNotNull(currentIpConfig.dnsSettings()); Assertions.assertNotNull(currentIpConfig.dnsSettings().domainNameLabel()); currentIpConfig.withIdleTimeoutInMinutes(20); virtualMachineScaleSet.update().withVirtualMachinePublicIp(currentIpConfig).apply(); currentIpConfig = virtualMachineScaleSet.virtualMachinePublicIpConfig(); Assertions.assertNotNull(currentIpConfig); Assertions.assertNotNull(currentIpConfig.idleTimeoutInMinutes()); Assertions.assertEquals((long) 20, (long) currentIpConfig.idleTimeoutInMinutes()); virtualMachineScaleSet.refresh(); currentIpConfig = virtualMachineScaleSet.virtualMachinePublicIpConfig(); Assertions.assertNotNull(currentIpConfig); Assertions.assertNotNull(currentIpConfig.idleTimeoutInMinutes()); Assertions.assertEquals((long) 20, (long) currentIpConfig.idleTimeoutInMinutes()); List<String> asgIds = virtualMachineScaleSet.applicationSecurityGroupIds(); Assertions.assertNotNull(asgIds); Assertions.assertEquals(1, asgIds.size()); NetworkSecurityGroup nsg = networkManager .networkSecurityGroups() .define(nsgName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .defineRule("rule1") .allowOutbound() .fromAnyAddress() .fromPort(80) .toAnyAddress() .toPort(80) .withProtocol(SecurityRuleProtocol.TCP) .attach() .create(); virtualMachineScaleSet.deallocate(); virtualMachineScaleSet .update() .withIpForwarding() .withAcceleratedNetworking() .withExistingNetworkSecurityGroup(nsg) .apply(); Assertions.assertTrue(virtualMachineScaleSet.isIpForwardingEnabled()); Assertions.assertTrue(virtualMachineScaleSet.isAcceleratedNetworkingEnabled()); Assertions.assertNotNull(virtualMachineScaleSet.networkSecurityGroupId()); virtualMachineScaleSet.refresh(); Assertions.assertTrue(virtualMachineScaleSet.isIpForwardingEnabled()); Assertions.assertTrue(virtualMachineScaleSet.isAcceleratedNetworkingEnabled()); Assertions.assertNotNull(virtualMachineScaleSet.networkSecurityGroupId()); virtualMachineScaleSet .update() .withoutIpForwarding() .withoutAcceleratedNetworking() .withoutNetworkSecurityGroup() .apply(); Assertions.assertFalse(virtualMachineScaleSet.isIpForwardingEnabled()); Assertions.assertFalse(virtualMachineScaleSet.isAcceleratedNetworkingEnabled()); Assertions.assertNull(virtualMachineScaleSet.networkSecurityGroupId()); } @Test @Disabled("Mock framework doesn't support data plane") public void canCreateVirtualMachineScaleSetWithSecret() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); final String vaultName = generateRandomResourceName("vlt", 10); final String secretName = generateRandomResourceName("srt", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region, resourceGroup, "1", LoadBalancerSkuType.BASIC); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); Vault vault = this .keyVaultManager .vaults() .define(vaultName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .defineAccessPolicy() .forServicePrincipal(clientIdFromFile()) .allowSecretAllPermissions() .attach() .withDeploymentEnabled() .create(); final InputStream embeddedJsonConfig = VirtualMachineExtensionOperationsTests.class.getResourceAsStream("/myTest.txt"); String secretValue = IOUtils.toString(embeddedJsonConfig, StandardCharsets.UTF_8); Secret secret = vault.secrets().define(secretName).withValue(secretValue).create(); List<VaultCertificate> certs = new ArrayList<>(); certs.add(new VaultCertificate().withCertificateUrl(secret.id())); List<VaultSecretGroup> group = new ArrayList<>(); group .add( new VaultSecretGroup() .withSourceVault(new SubResource().withId(vault.id())) .withVaultCertificates(certs)); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withSecrets(group) .withNewStorageAccount(generateRandomResourceName("stg", 15)) .withNewStorageAccount(generateRandomResourceName("stg3", 15)) .withUpgradeMode(UpgradeMode.MANUAL) .create(); for (VirtualMachineScaleSetVM vm : virtualMachineScaleSet.virtualMachines().list()) { Assertions.assertTrue(vm.osProfile().secrets().size() > 0); } virtualMachineScaleSet.update().withoutSecrets().apply(); for (VirtualMachineScaleSetVM vm : virtualMachineScaleSet.virtualMachines().list()) { Assertions.assertTrue(vm.osProfile().secrets().size() == 0); } } public void canCreateVirtualMachineScaleSet() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region, resourceGroup, "1", LoadBalancerSkuType.BASIC); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withNewStorageAccount(generateRandomResourceName("stg", 15)) .withNewStorageAccount(generateRandomResourceName("stg", 15)) .create(); Assertions.assertNull(virtualMachineScaleSet.getPrimaryInternalLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternalLoadBalancerBackends().size() == 0); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternalLoadBalancerInboundNatPools().size() == 0); Assertions.assertNotNull(virtualMachineScaleSet.getPrimaryInternetFacingLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerBackends().size() == 2); Assertions .assertTrue(virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerInboundNatPools().size() == 2); Network primaryNetwork = virtualMachineScaleSet.getPrimaryNetwork(); Assertions.assertNotNull(primaryNetwork.id()); PagedIterable<VirtualMachineScaleSetNetworkInterface> nics = virtualMachineScaleSet.listNetworkInterfaces(); int nicCount = 0; for (VirtualMachineScaleSetNetworkInterface nic : nics) { nicCount++; Assertions.assertNotNull(nic.id()); Assertions .assertTrue(nic.virtualMachineId().toLowerCase().startsWith(virtualMachineScaleSet.id().toLowerCase())); Assertions.assertNotNull(nic.macAddress()); Assertions.assertNotNull(nic.dnsServers()); Assertions.assertNotNull(nic.appliedDnsServers()); Map<String, VirtualMachineScaleSetNicIpConfiguration> ipConfigs = nic.ipConfigurations(); Assertions.assertEquals(ipConfigs.size(), 1); for (Map.Entry<String, VirtualMachineScaleSetNicIpConfiguration> entry : ipConfigs.entrySet()) { VirtualMachineScaleSetNicIpConfiguration ipConfig = entry.getValue(); Assertions.assertNotNull(ipConfig); Assertions.assertTrue(ipConfig.isPrimary()); Assertions.assertNotNull(ipConfig.subnetName()); Assertions.assertTrue(primaryNetwork.id().toLowerCase().equalsIgnoreCase(ipConfig.networkId())); Assertions.assertNotNull(ipConfig.privateIpAddress()); Assertions.assertNotNull(ipConfig.privateIpAddressVersion()); Assertions.assertNotNull(ipConfig.privateIpAllocationMethod()); List<LoadBalancerBackend> lbBackends = ipConfig.listAssociatedLoadBalancerBackends(); Assertions.assertEquals(lbBackends.size(), 2); for (LoadBalancerBackend lbBackend : lbBackends) { Map<String, LoadBalancingRule> lbRules = lbBackend.loadBalancingRules(); Assertions.assertEquals(lbRules.size(), 1); for (Map.Entry<String, LoadBalancingRule> ruleEntry : lbRules.entrySet()) { LoadBalancingRule rule = ruleEntry.getValue(); Assertions.assertNotNull(rule); Assertions .assertTrue( (rule.frontendPort() == 80 && rule.backendPort() == 80) || (rule.frontendPort() == 443 && rule.backendPort() == 443)); } } List<LoadBalancerInboundNatRule> lbNatRules = ipConfig.listAssociatedLoadBalancerInboundNatRules(); Assertions.assertEquals(lbNatRules.size(), 2); for (LoadBalancerInboundNatRule lbNatRule : lbNatRules) { Assertions .assertTrue( (lbNatRule.frontendPort() >= 5000 && lbNatRule.frontendPort() <= 5099) || (lbNatRule.frontendPort() >= 6000 && lbNatRule.frontendPort() <= 6099)); Assertions.assertTrue(lbNatRule.backendPort() == 22 || lbNatRule.backendPort() == 23); } } } Assertions.assertTrue(nicCount > 0); Assertions.assertEquals(virtualMachineScaleSet.vhdContainers().size(), 2); Assertions.assertEquals(virtualMachineScaleSet.sku(), VirtualMachineScaleSetSkuTypes.STANDARD_A0); Assertions.assertTrue(virtualMachineScaleSet.upgradeModel() == UpgradeMode.AUTOMATIC); Assertions.assertEquals(virtualMachineScaleSet.capacity(), 2); primaryNetwork = virtualMachineScaleSet.getPrimaryNetwork(); String inboundNatPoolToRemove = null; for (String inboundNatPoolName : virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerInboundNatPools().keySet()) { inboundNatPoolToRemove = inboundNatPoolName; break; } LoadBalancer internalLoadBalancer = createInternalLoadBalancer(region, resourceGroup, primaryNetwork, "1"); virtualMachineScaleSet .update() .withExistingPrimaryInternalLoadBalancer(internalLoadBalancer) .withoutPrimaryInternetFacingLoadBalancerNatPools(inboundNatPoolToRemove) .apply(); virtualMachineScaleSet = this.computeManager.virtualMachineScaleSets().getByResourceGroup(rgName, vmssName); Assertions.assertNotNull(virtualMachineScaleSet.getPrimaryInternetFacingLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerBackends().size() == 2); Assertions .assertTrue(virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerInboundNatPools().size() == 1); Assertions.assertNotNull(virtualMachineScaleSet.getPrimaryInternalLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternalLoadBalancerBackends().size() == 2); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternalLoadBalancerInboundNatPools().size() == 2); nics = virtualMachineScaleSet.listNetworkInterfaces(); nicCount = 0; for (VirtualMachineScaleSetNetworkInterface nic : nics) { nicCount++; Map<String, VirtualMachineScaleSetNicIpConfiguration> ipConfigs = nic.ipConfigurations(); Assertions.assertEquals(ipConfigs.size(), 1); for (Map.Entry<String, VirtualMachineScaleSetNicIpConfiguration> entry : ipConfigs.entrySet()) { VirtualMachineScaleSetNicIpConfiguration ipConfig = entry.getValue(); Assertions.assertNotNull(ipConfig); List<LoadBalancerBackend> lbBackends = ipConfig.listAssociatedLoadBalancerBackends(); Assertions.assertNotNull(lbBackends); for (LoadBalancerBackend lbBackend : lbBackends) { Map<String, LoadBalancingRule> lbRules = lbBackend.loadBalancingRules(); Assertions.assertEquals(lbRules.size(), 1); for (Map.Entry<String, LoadBalancingRule> ruleEntry : lbRules.entrySet()) { LoadBalancingRule rule = ruleEntry.getValue(); Assertions.assertNotNull(rule); Assertions .assertTrue( (rule.frontendPort() == 80 && rule.backendPort() == 80) || (rule.frontendPort() == 443 && rule.backendPort() == 443) || (rule.frontendPort() == 1000 && rule.backendPort() == 1000) || (rule.frontendPort() == 1001 && rule.backendPort() == 1001)); } } List<LoadBalancerInboundNatRule> lbNatRules = ipConfig.listAssociatedLoadBalancerInboundNatRules(); for (LoadBalancerInboundNatRule lbNatRule : lbNatRules) { Assertions .assertTrue( (lbNatRule.frontendPort() >= 6000 && lbNatRule.frontendPort() <= 6099) || (lbNatRule.frontendPort() >= 5000 && lbNatRule.frontendPort() <= 5099) || (lbNatRule.frontendPort() >= 8000 && lbNatRule.frontendPort() <= 8099) || (lbNatRule.frontendPort() >= 9000 && lbNatRule.frontendPort() <= 9099)); Assertions .assertTrue( lbNatRule.backendPort() == 23 || lbNatRule.backendPort() == 22 || lbNatRule.backendPort() == 44 || lbNatRule.backendPort() == 45); } } } Assertions.assertTrue(nicCount > 0); } /* * Previously name * canCreateTwoRegionalVirtualMachineScaleSetsAndAssociateEachWithDifferentBackendPoolOfZoneResilientLoadBalancer * but this was too long for some OSes and would cause git checkout to fail. */ @Test public void canCreateTwoRegionalVMScaleSetsWithDifferentPoolOfZoneResilientLoadBalancer() throws Exception { Region region2 = Region.US_EAST2; ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region2).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region2) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region2, resourceGroup, "1", LoadBalancerSkuType.STANDARD); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); List<String> natpools = new ArrayList<>(); for (String natPool : publicLoadBalancer.inboundNatPools().keySet()) { natpools.add(natPool); } Assertions.assertTrue(natpools.size() == 2); final String vmssName1 = generateRandomResourceName("vmss1", 10); VirtualMachineScaleSet virtualMachineScaleSet1 = this .computeManager .virtualMachineScaleSets() .define(vmssName1) .withRegion(region2) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0)) .withPrimaryInternetFacingLoadBalancerInboundNatPools(natpools.get(0)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); final String vmssName2 = generateRandomResourceName("vmss2", 10); VirtualMachineScaleSet virtualMachineScaleSet2 = this .computeManager .virtualMachineScaleSets() .define(vmssName2) .withRegion(region2) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(1)) .withPrimaryInternetFacingLoadBalancerInboundNatPools(natpools.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); Assertions.assertNull(virtualMachineScaleSet1.getPrimaryInternalLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet1.listPrimaryInternalLoadBalancerBackends().size() == 0); Assertions.assertTrue(virtualMachineScaleSet1.listPrimaryInternalLoadBalancerInboundNatPools().size() == 0); Assertions.assertNotNull(virtualMachineScaleSet1.getPrimaryInternetFacingLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet1.listPrimaryInternetFacingLoadBalancerBackends().size() == 1); Assertions.assertNull(virtualMachineScaleSet2.getPrimaryInternalLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet2.listPrimaryInternalLoadBalancerBackends().size() == 0); Assertions.assertTrue(virtualMachineScaleSet2.listPrimaryInternalLoadBalancerInboundNatPools().size() == 0); Assertions.assertNotNull(virtualMachineScaleSet2.getPrimaryInternetFacingLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet2.listPrimaryInternetFacingLoadBalancerBackends().size() == 1); } @Test public void canCreateZoneRedundantVirtualMachineScaleSetWithZoneResilientLoadBalancer() throws Exception { Region region2 = Region.US_EAST2; ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region2).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region2) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region2, resourceGroup, "1", LoadBalancerSkuType.STANDARD); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); final String vmssName = generateRandomResourceName("vmss", 10); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region2) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_D3_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withAvailabilityZone(AvailabilityZoneId.ZONE_1) .withAvailabilityZone(AvailabilityZoneId.ZONE_2) .create(); Assertions.assertNotNull(virtualMachineScaleSet.availabilityZones()); Assertions.assertEquals(2, virtualMachineScaleSet.availabilityZones().size()); Assertions.assertNull(virtualMachineScaleSet.getPrimaryInternalLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternalLoadBalancerBackends().size() == 0); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternalLoadBalancerInboundNatPools().size() == 0); Assertions.assertNotNull(virtualMachineScaleSet.getPrimaryInternetFacingLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerBackends().size() == 2); Assertions .assertTrue(virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerInboundNatPools().size() == 2); Network primaryNetwork = virtualMachineScaleSet.getPrimaryNetwork(); Assertions.assertNotNull(primaryNetwork.id()); } @Test public void canEnableMSIOnVirtualMachineScaleSetWithoutRoleAssignment() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region, resourceGroup, "1", LoadBalancerSkuType.BASIC); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withSystemAssignedManagedServiceIdentity() .create(); PagedIterable<RoleAssignment> rgRoleAssignments = authorizationManager.roleAssignments().listByScope(resourceGroup.id()); Assertions.assertNotNull(rgRoleAssignments); boolean found = false; for (RoleAssignment roleAssignment : rgRoleAssignments) { if (roleAssignment.principalId() != null && roleAssignment .principalId() .equalsIgnoreCase(virtualMachineScaleSet.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions .assertFalse( found, "Resource group should not have a role assignment with virtual machine scale set MSI principal"); } @Test public void canEnableMSIOnVirtualMachineScaleSetWithMultipleRoleAssignment() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region, resourceGroup, "1", LoadBalancerSkuType.BASIC); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); StorageAccount storageAccount = this .storageManager .storageAccounts() .define(generateRandomResourceName("jvcsrg", 10)) .withRegion(region) .withExistingResourceGroup(resourceGroup) .create(); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withSystemAssignedManagedServiceIdentity() .withSystemAssignedIdentityBasedAccessToCurrentResourceGroup(BuiltInRole.CONTRIBUTOR) .withSystemAssignedIdentityBasedAccessTo(storageAccount.id(), BuiltInRole.CONTRIBUTOR) .create(); Assertions.assertNotNull(virtualMachineScaleSet.managedServiceIdentityType()); Assertions .assertTrue( virtualMachineScaleSet.managedServiceIdentityType().equals(ResourceIdentityType.SYSTEM_ASSIGNED)); PagedIterable<RoleAssignment> rgRoleAssignments = authorizationManager.roleAssignments().listByScope(resourceGroup.id()); Assertions.assertNotNull(rgRoleAssignments); boolean found = false; for (RoleAssignment roleAssignment : rgRoleAssignments) { if (roleAssignment.principalId() != null && roleAssignment .principalId() .equalsIgnoreCase(virtualMachineScaleSet.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions .assertTrue( found, "Resource group should have a role assignment with virtual machine scale set MSI principal"); PagedIterable<RoleAssignment> stgRoleAssignments = authorizationManager.roleAssignments().listByScope(storageAccount.id()); Assertions.assertNotNull(stgRoleAssignments); found = false; for (RoleAssignment roleAssignment : stgRoleAssignments) { if (roleAssignment.principalId() != null && roleAssignment .principalId() .equalsIgnoreCase(virtualMachineScaleSet.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions .assertTrue( found, "Storage account should have a role assignment with virtual machine scale set MSI principal"); } @Test public void canGetSingleVMSSInstance() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region, resourceGroup, "1", LoadBalancerSkuType.BASIC); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withNewStorageAccount(generateRandomResourceName("stg", 15)) .withNewStorageAccount(generateRandomResourceName("stg3", 15)) .withUpgradeMode(UpgradeMode.MANUAL) .create(); VirtualMachineScaleSet virtualMachineScaleSet = this.computeManager.virtualMachineScaleSets().getByResourceGroup(rgName, vmssName); VirtualMachineScaleSetVMs virtualMachineScaleSetVMs = virtualMachineScaleSet.virtualMachines(); VirtualMachineScaleSetVM firstVm = virtualMachineScaleSetVMs.list().iterator().next(); VirtualMachineScaleSetVM fetchedVm = virtualMachineScaleSetVMs.getInstance(firstVm.instanceId()); this.checkVmsEqual(firstVm, fetchedVm); VirtualMachineScaleSetVM fetchedAsyncVm = virtualMachineScaleSetVMs.getInstanceAsync(firstVm.instanceId()).block(); this.checkVmsEqual(firstVm, fetchedAsyncVm); } @Test public void canCreateLowPriorityVMSSInstance() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region, resourceGroup, "1", LoadBalancerSkuType.STANDARD); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); VirtualMachineScaleSet vmss = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_D3_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withNewStorageAccount(generateRandomResourceName("stg", 15)) .withNewStorageAccount(generateRandomResourceName("stg3", 15)) .withUpgradeMode(UpgradeMode.MANUAL) .withLowPriorityVirtualMachine(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withMaxPrice(-1.0) .create(); Assertions.assertEquals(vmss.virtualMachinePriority(), VirtualMachinePriorityTypes.LOW); Assertions.assertEquals(vmss.virtualMachineEvictionPolicy(), VirtualMachineEvictionPolicyTypes.DEALLOCATE); Assertions.assertEquals(vmss.billingProfile().maxPrice(), (Double) (-1.0)); vmss.update().withMaxPrice(2000.0).apply(); Assertions.assertEquals(vmss.billingProfile().maxPrice(), (Double) 2000.0); } @Test public void canPerformSimulateEvictionOnSpotVMSSInstance() { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups() .define(rgName) .withRegion(region) .create(); Network network = this.networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); VirtualMachineScaleSet vmss = computeManager.virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_D3_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withoutPrimaryInternetFacingLoadBalancer() .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withSpotPriorityVirtualMachine(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .create(); PagedIterable<VirtualMachineScaleSetVM> vmInstances = vmss.virtualMachines().list(); for (VirtualMachineScaleSetVM instance: vmInstances) { Assertions.assertTrue(instance.osDiskSizeInGB() > 0); vmss.virtualMachines().simulateEviction(instance.instanceId()); } boolean deallocated = false; int pollIntervalInMinutes = 5; for (int i = 0; i < 30; i += pollIntervalInMinutes) { ResourceManagerUtils.sleep(Duration.ofMinutes(pollIntervalInMinutes)); deallocated = true; for (VirtualMachineScaleSetVM instance: vmInstances) { instance.refresh(); if (instance.powerState() != PowerState.DEALLOCATED) { deallocated = false; } } if (deallocated) { break; } } Assertions.assertTrue(deallocated); for (VirtualMachineScaleSetVM instance: vmInstances) { instance.refresh(); Assertions.assertEquals(0, instance.osDiskSizeInGB()); } } private void checkVmsEqual(VirtualMachineScaleSetVM original, VirtualMachineScaleSetVM fetched) { Assertions.assertEquals(original.administratorUserName(), fetched.administratorUserName()); Assertions.assertEquals(original.availabilitySetId(), fetched.availabilitySetId()); Assertions.assertEquals(original.bootDiagnosticEnabled(), fetched.bootDiagnosticEnabled()); Assertions.assertEquals(original.bootDiagnosticStorageAccountUri(), fetched.bootDiagnosticStorageAccountUri()); Assertions.assertEquals(original.computerName(), fetched.computerName()); Assertions.assertEquals(original.dataDisks().size(), fetched.dataDisks().size()); Assertions.assertEquals(original.extensions().size(), fetched.extensions().size()); Assertions.assertEquals(original.instanceId(), fetched.instanceId()); Assertions.assertEquals(original.isLatestScaleSetUpdateApplied(), fetched.isLatestScaleSetUpdateApplied()); Assertions.assertEquals(original.isLinuxPasswordAuthenticationEnabled(), fetched.isLinuxPasswordAuthenticationEnabled()); Assertions.assertEquals(original.isManagedDiskEnabled(), fetched.isManagedDiskEnabled()); Assertions.assertEquals(original.isOSBasedOnCustomImage(), fetched.isOSBasedOnCustomImage()); Assertions.assertEquals(original.isOSBasedOnPlatformImage(), fetched.isOSBasedOnPlatformImage()); Assertions.assertEquals(original.isOSBasedOnStoredImage(), fetched.isOSBasedOnStoredImage()); Assertions.assertEquals(original.isWindowsAutoUpdateEnabled(), fetched.isWindowsAutoUpdateEnabled()); Assertions.assertEquals(original.isWindowsVMAgentProvisioned(), original.isWindowsVMAgentProvisioned()); Assertions.assertEquals(original.networkInterfaceIds().size(), fetched.networkInterfaceIds().size()); Assertions.assertEquals(original.osDiskCachingType(), fetched.osDiskCachingType()); Assertions.assertEquals(original.osDiskId(), fetched.osDiskId()); Assertions.assertEquals(original.osDiskName(), fetched.osDiskName()); Assertions.assertEquals(original.osDiskSizeInGB(), fetched.osDiskSizeInGB()); Assertions.assertEquals(original.osType(), fetched.osType()); Assertions.assertEquals(original.osUnmanagedDiskVhdUri(), fetched.osUnmanagedDiskVhdUri()); Assertions.assertEquals(original.powerState(), fetched.powerState()); Assertions.assertEquals(original.primaryNetworkInterfaceId(), fetched.primaryNetworkInterfaceId()); Assertions.assertEquals(original.size(), fetched.size()); Assertions.assertEquals(original.sku().name(), fetched.sku().name()); Assertions.assertEquals(original.storedImageUnmanagedVhdUri(), fetched.storedImageUnmanagedVhdUri()); Assertions.assertEquals(original.unmanagedDataDisks().size(), fetched.unmanagedDataDisks().size()); Assertions.assertEquals(original.windowsTimeZone(), fetched.windowsTimeZone()); } private void checkVMInstances(VirtualMachineScaleSet vmScaleSet) { VirtualMachineScaleSetVMs virtualMachineScaleSetVMs = vmScaleSet.virtualMachines(); PagedIterable<VirtualMachineScaleSetVM> virtualMachines = virtualMachineScaleSetVMs.list(); Assertions.assertEquals(TestUtilities.getSize(virtualMachines), vmScaleSet.capacity()); Assertions.assertTrue(TestUtilities.getSize(virtualMachines) > 0); virtualMachineScaleSetVMs.updateInstances(virtualMachines.iterator().next().instanceId()); for (VirtualMachineScaleSetVM vm : virtualMachines) { Assertions.assertNotNull(vm.size()); Assertions.assertEquals(vm.osType(), OperatingSystemTypes.LINUX); Assertions.assertNotNull(vm.computerName().startsWith(vmScaleSet.computerNamePrefix())); Assertions.assertTrue(vm.isOSBasedOnPlatformImage()); Assertions.assertNull(vm.osDiskId()); Assertions.assertNotNull(vm.osUnmanagedDiskVhdUri()); Assertions.assertNull(vm.storedImageUnmanagedVhdUri()); Assertions.assertFalse(vm.isWindowsAutoUpdateEnabled()); Assertions.assertFalse(vm.isWindowsVMAgentProvisioned()); Assertions.assertTrue(vm.administratorUserName().equalsIgnoreCase("jvuser")); VirtualMachineImage vmImage = vm.getOSPlatformImage(); Assertions.assertNotNull(vmImage); Assertions.assertEquals(vm.extensions().size(), vmScaleSet.extensions().size()); Assertions.assertNotNull(vm.powerState()); vm.refreshInstanceView(); } VirtualMachineScaleSetVM virtualMachineScaleSetVM = virtualMachines.iterator().next(); Assertions.assertNotNull(virtualMachineScaleSetVM); virtualMachineScaleSetVM.restart(); virtualMachineScaleSetVM.powerOff(); virtualMachineScaleSetVM.refreshInstanceView(); Assertions.assertEquals(virtualMachineScaleSetVM.powerState(), PowerState.STOPPED); virtualMachineScaleSetVM.start(); for (VirtualMachineScaleSetVM vm : virtualMachines) { PagedIterable<VirtualMachineScaleSetNetworkInterface> nics = vmScaleSet.listNetworkInterfacesByInstanceId(vm.instanceId()); Assertions.assertNotNull(nics); Assertions.assertEquals(TestUtilities.getSize(nics), 1); VirtualMachineScaleSetNetworkInterface nic = nics.iterator().next(); Assertions.assertNotNull(nic.virtualMachineId()); Assertions.assertTrue(nic.virtualMachineId().toLowerCase().equalsIgnoreCase(vm.id())); Assertions.assertNotNull(vm.listNetworkInterfaces()); VirtualMachineScaleSetNetworkInterface nicA = vmScaleSet.getNetworkInterfaceByInstanceId(vm.instanceId(), nic.name()); Assertions.assertNotNull(nicA); VirtualMachineScaleSetNetworkInterface nicB = vm.getNetworkInterface(nic.name()); Assertions.assertNotNull(nicB); } } @Test public void testVirtualMachineScaleSetSkuTypes() { rgName = null; VirtualMachineScaleSetSkuTypes skuType = VirtualMachineScaleSetSkuTypes.STANDARD_A0; Assertions.assertNull(skuType.sku().capacity()); Sku sku1 = skuType.sku(); Assertions.assertNull(sku1.capacity()); sku1.withCapacity(1L); Assertions.assertEquals(sku1.capacity().longValue(), 1); Assertions.assertNull(skuType.sku().capacity()); Sku sku2 = skuType.sku(); Assertions.assertNull(sku2.capacity()); sku2.withCapacity(2L); Assertions.assertEquals(sku2.capacity().longValue(), 2); Assertions.assertNull(skuType.sku().capacity()); Assertions.assertEquals(sku1.capacity().longValue(), 1); } @Test public void canDeleteVMSSInstance() throws Exception { String euapRegion = "eastus2euap"; final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName) .withRegion(euapRegion) .create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(euapRegion) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); VirtualMachineScaleSet vmss = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(euapRegion) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withoutPrimaryInternetFacingLoadBalancer() .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(4) .create(); Assertions.assertEquals(4, vmss.virtualMachines().list().stream().count()); List<String> firstTwoIds = vmss.virtualMachines().list().stream() .limit(2) .map(VirtualMachineScaleSetVM::instanceId) .collect(Collectors.toList()); vmss.virtualMachines().deleteInstances(firstTwoIds, true); Assertions.assertEquals(2, vmss.virtualMachines().list().stream().count()); vmss.virtualMachines().deleteInstances(Collections.singleton(vmss.virtualMachines().list().stream().findFirst().get().instanceId()), false); Assertions.assertEquals(1, vmss.virtualMachines().list().stream().count()); computeManager.virtualMachineScaleSets().deleteInstances(rgName, vmssName, Collections.singleton(vmss.virtualMachines().list().stream().findFirst().get().instanceId()), false); } }
class VirtualMachineScaleSetOperationsTests extends ComputeManagementTest { private String rgName = ""; private final Region region = Region.US_WEST; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { rgName = generateRandomResourceName("javacsmrg", 15); super.initializeClients(httpPipeline, profile); } @Override protected void cleanUpResources() { if (rgName != null) { resourceManager.resourceGroups().beginDeleteByName(rgName); } } @Test @Test public void canUpdateVirtualMachineScaleSetWithExtensionProtectedSettings() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); final String uname = "jvuser"; final String password = password(); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); StorageAccount storageAccount = this .storageManager .storageAccounts() .define(generateRandomResourceName("stg", 15)) .withRegion(region) .withExistingResourceGroup(resourceGroup) .create(); List<StorageAccountKey> keys = storageAccount.getKeys(); Assertions.assertNotNull(keys); Assertions.assertTrue(keys.size() > 0); String storageAccountKey = keys.get(0).value(); Network network = this .networkManager .networks() .define(generateRandomResourceName("vmssvnet", 15)) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withoutPrimaryInternetFacingLoadBalancer() .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername(uname) .withSsh(sshPublicKey()) .withUnmanagedDisks() .withNewStorageAccount(generateRandomResourceName("stg", 15)) .withExistingStorageAccount(storageAccount) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("commandToExecute", "ls") .withProtectedSetting("storageAccountName", storageAccount.name()) .withProtectedSetting("storageAccountKey", storageAccountKey) .attach() .create(); Map<String, VirtualMachineScaleSetExtension> extensions = virtualMachineScaleSet.extensions(); Assertions.assertNotNull(extensions); Assertions.assertTrue(extensions.size() > 0); Assertions.assertTrue(extensions.containsKey("CustomScriptForLinux")); VirtualMachineScaleSetExtension extension = extensions.get("CustomScriptForLinux"); Assertions.assertNotNull(extension.publicSettings()); Assertions.assertEquals(1, extension.publicSettings().size()); Assertions.assertNotNull(extension.publicSettingsAsJsonString()); VirtualMachineScaleSet scaleSet = this.computeManager.virtualMachineScaleSets().getById(virtualMachineScaleSet.id()); extensions = scaleSet.extensions(); Assertions.assertNotNull(extensions); Assertions.assertTrue(extensions.size() > 0); Assertions.assertTrue(extensions.containsKey("CustomScriptForLinux")); extension = extensions.get("CustomScriptForLinux"); Assertions.assertNotNull(extension.publicSettings()); Assertions.assertEquals(1, extension.publicSettings().size()); Assertions.assertNotNull(extension.publicSettingsAsJsonString()); int newCapacity = scaleSet.capacity() + 1; virtualMachineScaleSet.update().withCapacity(newCapacity).apply(); extensions = virtualMachineScaleSet.extensions(); Assertions.assertNotNull(extensions); Assertions.assertTrue(extensions.size() > 0); Assertions.assertTrue(extensions.containsKey("CustomScriptForLinux")); extension = extensions.get("CustomScriptForLinux"); Assertions.assertNotNull(extension.publicSettings()); Assertions.assertEquals(1, extension.publicSettings().size()); Assertions.assertNotNull(extension.publicSettingsAsJsonString()); } @Test public void canCreateVirtualMachineScaleSetWithCustomScriptExtension() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); final String uname = "jvuser"; final String password = password(); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define(generateRandomResourceName("vmssvnet", 15)) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, resourceGroup, "1"); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername(uname) .withSsh(sshPublicKey()) .withUnmanagedDisks() .withNewStorageAccount(generateRandomResourceName("stg", 15)) .withNewStorageAccount(generateRandomResourceName("stg", 15)) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("commandToExecute", "ls") .attach() .withUpgradeMode(UpgradeMode.MANUAL) .create(); checkVMInstances(virtualMachineScaleSet); List<String> publicIPAddressIds = virtualMachineScaleSet.primaryPublicIpAddressIds(); PublicIpAddress publicIPAddress = this.networkManager.publicIpAddresses().getById(publicIPAddressIds.get(0)); String fqdn = publicIPAddress.fqdn(); if (!isPlaybackMode()) { HttpClient client = new NettyAsyncHttpClientBuilder().build(); HttpRequest request = new HttpRequest(HttpMethod.GET, "http: HttpResponse response = client.send(request).block(); Assertions.assertEquals(response.getStatusCode(), 200); } for (VirtualMachineScaleSetVM vm : virtualMachineScaleSet.virtualMachines().list()) { PagedIterable<VirtualMachineScaleSetNetworkInterface> networkInterfaces = vm.listNetworkInterfaces(); Assertions.assertEquals(TestUtilities.getSize(networkInterfaces), 1); VirtualMachineScaleSetNetworkInterface networkInterface = networkInterfaces.iterator().next(); VirtualMachineScaleSetNicIpConfiguration primaryIpConfig = null; primaryIpConfig = networkInterface.primaryIPConfiguration(); Assertions.assertNotNull(primaryIpConfig); Integer sshFrontendPort = null; List<LoadBalancerInboundNatRule> natRules = primaryIpConfig.listAssociatedLoadBalancerInboundNatRules(); for (LoadBalancerInboundNatRule natRule : natRules) { if (natRule.backendPort() == 22) { sshFrontendPort = natRule.frontendPort(); break; } } Assertions.assertNotNull(sshFrontendPort); this.sleep(1000 * 60); this.ensureCanDoSsh(fqdn, sshFrontendPort, uname, password); } } @Test public void canCreateVirtualMachineScaleSetWithOptionalNetworkSettings() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); final String vmssVmDnsLabel = generateRandomResourceName("pip", 10); final String nsgName = generateRandomResourceName("nsg", 10); final String asgName = generateRandomResourceName("asg", 8); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); ApplicationSecurityGroup asg = this .networkManager .applicationSecurityGroups() .define(asgName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .create(); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_D3_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withoutPrimaryInternetFacingLoadBalancer() .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withVirtualMachinePublicIp(vmssVmDnsLabel) .withExistingApplicationSecurityGroup(asg) .create(); VirtualMachineScaleSetPublicIpAddressConfiguration currentIpConfig = virtualMachineScaleSet.virtualMachinePublicIpConfig(); Assertions.assertNotNull(currentIpConfig); Assertions.assertNotNull(currentIpConfig.dnsSettings()); Assertions.assertNotNull(currentIpConfig.dnsSettings().domainNameLabel()); currentIpConfig.withIdleTimeoutInMinutes(20); virtualMachineScaleSet.update().withVirtualMachinePublicIp(currentIpConfig).apply(); currentIpConfig = virtualMachineScaleSet.virtualMachinePublicIpConfig(); Assertions.assertNotNull(currentIpConfig); Assertions.assertNotNull(currentIpConfig.idleTimeoutInMinutes()); Assertions.assertEquals((long) 20, (long) currentIpConfig.idleTimeoutInMinutes()); virtualMachineScaleSet.refresh(); currentIpConfig = virtualMachineScaleSet.virtualMachinePublicIpConfig(); Assertions.assertNotNull(currentIpConfig); Assertions.assertNotNull(currentIpConfig.idleTimeoutInMinutes()); Assertions.assertEquals((long) 20, (long) currentIpConfig.idleTimeoutInMinutes()); List<String> asgIds = virtualMachineScaleSet.applicationSecurityGroupIds(); Assertions.assertNotNull(asgIds); Assertions.assertEquals(1, asgIds.size()); NetworkSecurityGroup nsg = networkManager .networkSecurityGroups() .define(nsgName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .defineRule("rule1") .allowOutbound() .fromAnyAddress() .fromPort(80) .toAnyAddress() .toPort(80) .withProtocol(SecurityRuleProtocol.TCP) .attach() .create(); virtualMachineScaleSet.deallocate(); virtualMachineScaleSet .update() .withIpForwarding() .withAcceleratedNetworking() .withExistingNetworkSecurityGroup(nsg) .apply(); Assertions.assertTrue(virtualMachineScaleSet.isIpForwardingEnabled()); Assertions.assertTrue(virtualMachineScaleSet.isAcceleratedNetworkingEnabled()); Assertions.assertNotNull(virtualMachineScaleSet.networkSecurityGroupId()); virtualMachineScaleSet.refresh(); Assertions.assertTrue(virtualMachineScaleSet.isIpForwardingEnabled()); Assertions.assertTrue(virtualMachineScaleSet.isAcceleratedNetworkingEnabled()); Assertions.assertNotNull(virtualMachineScaleSet.networkSecurityGroupId()); virtualMachineScaleSet .update() .withoutIpForwarding() .withoutAcceleratedNetworking() .withoutNetworkSecurityGroup() .apply(); Assertions.assertFalse(virtualMachineScaleSet.isIpForwardingEnabled()); Assertions.assertFalse(virtualMachineScaleSet.isAcceleratedNetworkingEnabled()); Assertions.assertNull(virtualMachineScaleSet.networkSecurityGroupId()); } @Test @Disabled("Mock framework doesn't support data plane") public void canCreateVirtualMachineScaleSetWithSecret() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); final String vaultName = generateRandomResourceName("vlt", 10); final String secretName = generateRandomResourceName("srt", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region, resourceGroup, "1", LoadBalancerSkuType.BASIC); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); Vault vault = this .keyVaultManager .vaults() .define(vaultName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .defineAccessPolicy() .forServicePrincipal(clientIdFromFile()) .allowSecretAllPermissions() .attach() .withDeploymentEnabled() .create(); final InputStream embeddedJsonConfig = VirtualMachineExtensionOperationsTests.class.getResourceAsStream("/myTest.txt"); String secretValue = IOUtils.toString(embeddedJsonConfig, StandardCharsets.UTF_8); Secret secret = vault.secrets().define(secretName).withValue(secretValue).create(); List<VaultCertificate> certs = new ArrayList<>(); certs.add(new VaultCertificate().withCertificateUrl(secret.id())); List<VaultSecretGroup> group = new ArrayList<>(); group .add( new VaultSecretGroup() .withSourceVault(new SubResource().withId(vault.id())) .withVaultCertificates(certs)); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withSecrets(group) .withNewStorageAccount(generateRandomResourceName("stg", 15)) .withNewStorageAccount(generateRandomResourceName("stg3", 15)) .withUpgradeMode(UpgradeMode.MANUAL) .create(); for (VirtualMachineScaleSetVM vm : virtualMachineScaleSet.virtualMachines().list()) { Assertions.assertTrue(vm.osProfile().secrets().size() > 0); } virtualMachineScaleSet.update().withoutSecrets().apply(); for (VirtualMachineScaleSetVM vm : virtualMachineScaleSet.virtualMachines().list()) { Assertions.assertTrue(vm.osProfile().secrets().size() == 0); } } public void canCreateVirtualMachineScaleSet() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region, resourceGroup, "1", LoadBalancerSkuType.BASIC); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withNewStorageAccount(generateRandomResourceName("stg", 15)) .withNewStorageAccount(generateRandomResourceName("stg", 15)) .create(); Assertions.assertNull(virtualMachineScaleSet.getPrimaryInternalLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternalLoadBalancerBackends().size() == 0); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternalLoadBalancerInboundNatPools().size() == 0); Assertions.assertNotNull(virtualMachineScaleSet.getPrimaryInternetFacingLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerBackends().size() == 2); Assertions .assertTrue(virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerInboundNatPools().size() == 2); Network primaryNetwork = virtualMachineScaleSet.getPrimaryNetwork(); Assertions.assertNotNull(primaryNetwork.id()); PagedIterable<VirtualMachineScaleSetNetworkInterface> nics = virtualMachineScaleSet.listNetworkInterfaces(); int nicCount = 0; for (VirtualMachineScaleSetNetworkInterface nic : nics) { nicCount++; Assertions.assertNotNull(nic.id()); Assertions .assertTrue(nic.virtualMachineId().toLowerCase().startsWith(virtualMachineScaleSet.id().toLowerCase())); Assertions.assertNotNull(nic.macAddress()); Assertions.assertNotNull(nic.dnsServers()); Assertions.assertNotNull(nic.appliedDnsServers()); Map<String, VirtualMachineScaleSetNicIpConfiguration> ipConfigs = nic.ipConfigurations(); Assertions.assertEquals(ipConfigs.size(), 1); for (Map.Entry<String, VirtualMachineScaleSetNicIpConfiguration> entry : ipConfigs.entrySet()) { VirtualMachineScaleSetNicIpConfiguration ipConfig = entry.getValue(); Assertions.assertNotNull(ipConfig); Assertions.assertTrue(ipConfig.isPrimary()); Assertions.assertNotNull(ipConfig.subnetName()); Assertions.assertTrue(primaryNetwork.id().toLowerCase().equalsIgnoreCase(ipConfig.networkId())); Assertions.assertNotNull(ipConfig.privateIpAddress()); Assertions.assertNotNull(ipConfig.privateIpAddressVersion()); Assertions.assertNotNull(ipConfig.privateIpAllocationMethod()); List<LoadBalancerBackend> lbBackends = ipConfig.listAssociatedLoadBalancerBackends(); Assertions.assertEquals(lbBackends.size(), 2); for (LoadBalancerBackend lbBackend : lbBackends) { Map<String, LoadBalancingRule> lbRules = lbBackend.loadBalancingRules(); Assertions.assertEquals(lbRules.size(), 1); for (Map.Entry<String, LoadBalancingRule> ruleEntry : lbRules.entrySet()) { LoadBalancingRule rule = ruleEntry.getValue(); Assertions.assertNotNull(rule); Assertions .assertTrue( (rule.frontendPort() == 80 && rule.backendPort() == 80) || (rule.frontendPort() == 443 && rule.backendPort() == 443)); } } List<LoadBalancerInboundNatRule> lbNatRules = ipConfig.listAssociatedLoadBalancerInboundNatRules(); Assertions.assertEquals(lbNatRules.size(), 2); for (LoadBalancerInboundNatRule lbNatRule : lbNatRules) { Assertions .assertTrue( (lbNatRule.frontendPort() >= 5000 && lbNatRule.frontendPort() <= 5099) || (lbNatRule.frontendPort() >= 6000 && lbNatRule.frontendPort() <= 6099)); Assertions.assertTrue(lbNatRule.backendPort() == 22 || lbNatRule.backendPort() == 23); } } } Assertions.assertTrue(nicCount > 0); Assertions.assertEquals(virtualMachineScaleSet.vhdContainers().size(), 2); Assertions.assertEquals(virtualMachineScaleSet.sku(), VirtualMachineScaleSetSkuTypes.STANDARD_A0); Assertions.assertTrue(virtualMachineScaleSet.upgradeModel() == UpgradeMode.AUTOMATIC); Assertions.assertEquals(virtualMachineScaleSet.capacity(), 2); primaryNetwork = virtualMachineScaleSet.getPrimaryNetwork(); String inboundNatPoolToRemove = null; for (String inboundNatPoolName : virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerInboundNatPools().keySet()) { inboundNatPoolToRemove = inboundNatPoolName; break; } LoadBalancer internalLoadBalancer = createInternalLoadBalancer(region, resourceGroup, primaryNetwork, "1"); virtualMachineScaleSet .update() .withExistingPrimaryInternalLoadBalancer(internalLoadBalancer) .withoutPrimaryInternetFacingLoadBalancerNatPools(inboundNatPoolToRemove) .apply(); virtualMachineScaleSet = this.computeManager.virtualMachineScaleSets().getByResourceGroup(rgName, vmssName); Assertions.assertNotNull(virtualMachineScaleSet.getPrimaryInternetFacingLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerBackends().size() == 2); Assertions .assertTrue(virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerInboundNatPools().size() == 1); Assertions.assertNotNull(virtualMachineScaleSet.getPrimaryInternalLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternalLoadBalancerBackends().size() == 2); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternalLoadBalancerInboundNatPools().size() == 2); nics = virtualMachineScaleSet.listNetworkInterfaces(); nicCount = 0; for (VirtualMachineScaleSetNetworkInterface nic : nics) { nicCount++; Map<String, VirtualMachineScaleSetNicIpConfiguration> ipConfigs = nic.ipConfigurations(); Assertions.assertEquals(ipConfigs.size(), 1); for (Map.Entry<String, VirtualMachineScaleSetNicIpConfiguration> entry : ipConfigs.entrySet()) { VirtualMachineScaleSetNicIpConfiguration ipConfig = entry.getValue(); Assertions.assertNotNull(ipConfig); List<LoadBalancerBackend> lbBackends = ipConfig.listAssociatedLoadBalancerBackends(); Assertions.assertNotNull(lbBackends); for (LoadBalancerBackend lbBackend : lbBackends) { Map<String, LoadBalancingRule> lbRules = lbBackend.loadBalancingRules(); Assertions.assertEquals(lbRules.size(), 1); for (Map.Entry<String, LoadBalancingRule> ruleEntry : lbRules.entrySet()) { LoadBalancingRule rule = ruleEntry.getValue(); Assertions.assertNotNull(rule); Assertions .assertTrue( (rule.frontendPort() == 80 && rule.backendPort() == 80) || (rule.frontendPort() == 443 && rule.backendPort() == 443) || (rule.frontendPort() == 1000 && rule.backendPort() == 1000) || (rule.frontendPort() == 1001 && rule.backendPort() == 1001)); } } List<LoadBalancerInboundNatRule> lbNatRules = ipConfig.listAssociatedLoadBalancerInboundNatRules(); for (LoadBalancerInboundNatRule lbNatRule : lbNatRules) { Assertions .assertTrue( (lbNatRule.frontendPort() >= 6000 && lbNatRule.frontendPort() <= 6099) || (lbNatRule.frontendPort() >= 5000 && lbNatRule.frontendPort() <= 5099) || (lbNatRule.frontendPort() >= 8000 && lbNatRule.frontendPort() <= 8099) || (lbNatRule.frontendPort() >= 9000 && lbNatRule.frontendPort() <= 9099)); Assertions .assertTrue( lbNatRule.backendPort() == 23 || lbNatRule.backendPort() == 22 || lbNatRule.backendPort() == 44 || lbNatRule.backendPort() == 45); } } } Assertions.assertTrue(nicCount > 0); } /* * Previously name * canCreateTwoRegionalVirtualMachineScaleSetsAndAssociateEachWithDifferentBackendPoolOfZoneResilientLoadBalancer * but this was too long for some OSes and would cause git checkout to fail. */ @Test public void canCreateTwoRegionalVMScaleSetsWithDifferentPoolOfZoneResilientLoadBalancer() throws Exception { Region region2 = Region.US_EAST2; ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region2).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region2) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region2, resourceGroup, "1", LoadBalancerSkuType.STANDARD); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); List<String> natpools = new ArrayList<>(); for (String natPool : publicLoadBalancer.inboundNatPools().keySet()) { natpools.add(natPool); } Assertions.assertTrue(natpools.size() == 2); final String vmssName1 = generateRandomResourceName("vmss1", 10); VirtualMachineScaleSet virtualMachineScaleSet1 = this .computeManager .virtualMachineScaleSets() .define(vmssName1) .withRegion(region2) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0)) .withPrimaryInternetFacingLoadBalancerInboundNatPools(natpools.get(0)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); final String vmssName2 = generateRandomResourceName("vmss2", 10); VirtualMachineScaleSet virtualMachineScaleSet2 = this .computeManager .virtualMachineScaleSets() .define(vmssName2) .withRegion(region2) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(1)) .withPrimaryInternetFacingLoadBalancerInboundNatPools(natpools.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); Assertions.assertNull(virtualMachineScaleSet1.getPrimaryInternalLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet1.listPrimaryInternalLoadBalancerBackends().size() == 0); Assertions.assertTrue(virtualMachineScaleSet1.listPrimaryInternalLoadBalancerInboundNatPools().size() == 0); Assertions.assertNotNull(virtualMachineScaleSet1.getPrimaryInternetFacingLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet1.listPrimaryInternetFacingLoadBalancerBackends().size() == 1); Assertions.assertNull(virtualMachineScaleSet2.getPrimaryInternalLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet2.listPrimaryInternalLoadBalancerBackends().size() == 0); Assertions.assertTrue(virtualMachineScaleSet2.listPrimaryInternalLoadBalancerInboundNatPools().size() == 0); Assertions.assertNotNull(virtualMachineScaleSet2.getPrimaryInternetFacingLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet2.listPrimaryInternetFacingLoadBalancerBackends().size() == 1); } @Test public void canCreateZoneRedundantVirtualMachineScaleSetWithZoneResilientLoadBalancer() throws Exception { Region region2 = Region.US_EAST2; ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region2).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region2) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region2, resourceGroup, "1", LoadBalancerSkuType.STANDARD); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); final String vmssName = generateRandomResourceName("vmss", 10); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region2) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_D3_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withAvailabilityZone(AvailabilityZoneId.ZONE_1) .withAvailabilityZone(AvailabilityZoneId.ZONE_2) .create(); Assertions.assertNotNull(virtualMachineScaleSet.availabilityZones()); Assertions.assertEquals(2, virtualMachineScaleSet.availabilityZones().size()); Assertions.assertNull(virtualMachineScaleSet.getPrimaryInternalLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternalLoadBalancerBackends().size() == 0); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternalLoadBalancerInboundNatPools().size() == 0); Assertions.assertNotNull(virtualMachineScaleSet.getPrimaryInternetFacingLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerBackends().size() == 2); Assertions .assertTrue(virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerInboundNatPools().size() == 2); Network primaryNetwork = virtualMachineScaleSet.getPrimaryNetwork(); Assertions.assertNotNull(primaryNetwork.id()); } @Test public void canEnableMSIOnVirtualMachineScaleSetWithoutRoleAssignment() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region, resourceGroup, "1", LoadBalancerSkuType.BASIC); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withSystemAssignedManagedServiceIdentity() .create(); PagedIterable<RoleAssignment> rgRoleAssignments = authorizationManager.roleAssignments().listByScope(resourceGroup.id()); Assertions.assertNotNull(rgRoleAssignments); boolean found = false; for (RoleAssignment roleAssignment : rgRoleAssignments) { if (roleAssignment.principalId() != null && roleAssignment .principalId() .equalsIgnoreCase(virtualMachineScaleSet.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions .assertFalse( found, "Resource group should not have a role assignment with virtual machine scale set MSI principal"); } @Test public void canEnableMSIOnVirtualMachineScaleSetWithMultipleRoleAssignment() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region, resourceGroup, "1", LoadBalancerSkuType.BASIC); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); StorageAccount storageAccount = this .storageManager .storageAccounts() .define(generateRandomResourceName("jvcsrg", 10)) .withRegion(region) .withExistingResourceGroup(resourceGroup) .create(); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withSystemAssignedManagedServiceIdentity() .withSystemAssignedIdentityBasedAccessToCurrentResourceGroup(BuiltInRole.CONTRIBUTOR) .withSystemAssignedIdentityBasedAccessTo(storageAccount.id(), BuiltInRole.CONTRIBUTOR) .create(); Assertions.assertNotNull(virtualMachineScaleSet.managedServiceIdentityType()); Assertions .assertTrue( virtualMachineScaleSet.managedServiceIdentityType().equals(ResourceIdentityType.SYSTEM_ASSIGNED)); PagedIterable<RoleAssignment> rgRoleAssignments = authorizationManager.roleAssignments().listByScope(resourceGroup.id()); Assertions.assertNotNull(rgRoleAssignments); boolean found = false; for (RoleAssignment roleAssignment : rgRoleAssignments) { if (roleAssignment.principalId() != null && roleAssignment .principalId() .equalsIgnoreCase(virtualMachineScaleSet.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions .assertTrue( found, "Resource group should have a role assignment with virtual machine scale set MSI principal"); PagedIterable<RoleAssignment> stgRoleAssignments = authorizationManager.roleAssignments().listByScope(storageAccount.id()); Assertions.assertNotNull(stgRoleAssignments); found = false; for (RoleAssignment roleAssignment : stgRoleAssignments) { if (roleAssignment.principalId() != null && roleAssignment .principalId() .equalsIgnoreCase(virtualMachineScaleSet.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions .assertTrue( found, "Storage account should have a role assignment with virtual machine scale set MSI principal"); } @Test public void canGetSingleVMSSInstance() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region, resourceGroup, "1", LoadBalancerSkuType.BASIC); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withNewStorageAccount(generateRandomResourceName("stg", 15)) .withNewStorageAccount(generateRandomResourceName("stg3", 15)) .withUpgradeMode(UpgradeMode.MANUAL) .create(); VirtualMachineScaleSet virtualMachineScaleSet = this.computeManager.virtualMachineScaleSets().getByResourceGroup(rgName, vmssName); VirtualMachineScaleSetVMs virtualMachineScaleSetVMs = virtualMachineScaleSet.virtualMachines(); VirtualMachineScaleSetVM firstVm = virtualMachineScaleSetVMs.list().iterator().next(); VirtualMachineScaleSetVM fetchedVm = virtualMachineScaleSetVMs.getInstance(firstVm.instanceId()); this.checkVmsEqual(firstVm, fetchedVm); VirtualMachineScaleSetVM fetchedAsyncVm = virtualMachineScaleSetVMs.getInstanceAsync(firstVm.instanceId()).block(); this.checkVmsEqual(firstVm, fetchedAsyncVm); } @Test public void canCreateLowPriorityVMSSInstance() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region, resourceGroup, "1", LoadBalancerSkuType.STANDARD); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); VirtualMachineScaleSet vmss = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_D3_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withNewStorageAccount(generateRandomResourceName("stg", 15)) .withNewStorageAccount(generateRandomResourceName("stg3", 15)) .withUpgradeMode(UpgradeMode.MANUAL) .withLowPriorityVirtualMachine(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withMaxPrice(-1.0) .create(); Assertions.assertEquals(vmss.virtualMachinePriority(), VirtualMachinePriorityTypes.LOW); Assertions.assertEquals(vmss.virtualMachineEvictionPolicy(), VirtualMachineEvictionPolicyTypes.DEALLOCATE); Assertions.assertEquals(vmss.billingProfile().maxPrice(), (Double) (-1.0)); vmss.update().withMaxPrice(2000.0).apply(); Assertions.assertEquals(vmss.billingProfile().maxPrice(), (Double) 2000.0); } @Test public void canPerformSimulateEvictionOnSpotVMSSInstance() { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups() .define(rgName) .withRegion(region) .create(); Network network = this.networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); VirtualMachineScaleSet vmss = computeManager.virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_D3_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withoutPrimaryInternetFacingLoadBalancer() .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withSpotPriorityVirtualMachine(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .create(); PagedIterable<VirtualMachineScaleSetVM> vmInstances = vmss.virtualMachines().list(); for (VirtualMachineScaleSetVM instance: vmInstances) { Assertions.assertTrue(instance.osDiskSizeInGB() > 0); vmss.virtualMachines().simulateEviction(instance.instanceId()); } boolean deallocated = false; int pollIntervalInMinutes = 5; for (int i = 0; i < 30; i += pollIntervalInMinutes) { ResourceManagerUtils.sleep(Duration.ofMinutes(pollIntervalInMinutes)); deallocated = true; for (VirtualMachineScaleSetVM instance: vmInstances) { instance.refresh(); if (instance.powerState() != PowerState.DEALLOCATED) { deallocated = false; } } if (deallocated) { break; } } Assertions.assertTrue(deallocated); for (VirtualMachineScaleSetVM instance: vmInstances) { instance.refresh(); Assertions.assertEquals(0, instance.osDiskSizeInGB()); } } private void checkVmsEqual(VirtualMachineScaleSetVM original, VirtualMachineScaleSetVM fetched) { Assertions.assertEquals(original.administratorUserName(), fetched.administratorUserName()); Assertions.assertEquals(original.availabilitySetId(), fetched.availabilitySetId()); Assertions.assertEquals(original.bootDiagnosticEnabled(), fetched.bootDiagnosticEnabled()); Assertions.assertEquals(original.bootDiagnosticStorageAccountUri(), fetched.bootDiagnosticStorageAccountUri()); Assertions.assertEquals(original.computerName(), fetched.computerName()); Assertions.assertEquals(original.dataDisks().size(), fetched.dataDisks().size()); Assertions.assertEquals(original.extensions().size(), fetched.extensions().size()); Assertions.assertEquals(original.instanceId(), fetched.instanceId()); Assertions.assertEquals(original.isLatestScaleSetUpdateApplied(), fetched.isLatestScaleSetUpdateApplied()); Assertions.assertEquals(original.isLinuxPasswordAuthenticationEnabled(), fetched.isLinuxPasswordAuthenticationEnabled()); Assertions.assertEquals(original.isManagedDiskEnabled(), fetched.isManagedDiskEnabled()); Assertions.assertEquals(original.isOSBasedOnCustomImage(), fetched.isOSBasedOnCustomImage()); Assertions.assertEquals(original.isOSBasedOnPlatformImage(), fetched.isOSBasedOnPlatformImage()); Assertions.assertEquals(original.isOSBasedOnStoredImage(), fetched.isOSBasedOnStoredImage()); Assertions.assertEquals(original.isWindowsAutoUpdateEnabled(), fetched.isWindowsAutoUpdateEnabled()); Assertions.assertEquals(original.isWindowsVMAgentProvisioned(), original.isWindowsVMAgentProvisioned()); Assertions.assertEquals(original.networkInterfaceIds().size(), fetched.networkInterfaceIds().size()); Assertions.assertEquals(original.osDiskCachingType(), fetched.osDiskCachingType()); Assertions.assertEquals(original.osDiskId(), fetched.osDiskId()); Assertions.assertEquals(original.osDiskName(), fetched.osDiskName()); Assertions.assertEquals(original.osDiskSizeInGB(), fetched.osDiskSizeInGB()); Assertions.assertEquals(original.osType(), fetched.osType()); Assertions.assertEquals(original.osUnmanagedDiskVhdUri(), fetched.osUnmanagedDiskVhdUri()); Assertions.assertEquals(original.powerState(), fetched.powerState()); Assertions.assertEquals(original.primaryNetworkInterfaceId(), fetched.primaryNetworkInterfaceId()); Assertions.assertEquals(original.size(), fetched.size()); Assertions.assertEquals(original.sku().name(), fetched.sku().name()); Assertions.assertEquals(original.storedImageUnmanagedVhdUri(), fetched.storedImageUnmanagedVhdUri()); Assertions.assertEquals(original.unmanagedDataDisks().size(), fetched.unmanagedDataDisks().size()); Assertions.assertEquals(original.windowsTimeZone(), fetched.windowsTimeZone()); } private void checkVMInstances(VirtualMachineScaleSet vmScaleSet) { VirtualMachineScaleSetVMs virtualMachineScaleSetVMs = vmScaleSet.virtualMachines(); PagedIterable<VirtualMachineScaleSetVM> virtualMachines = virtualMachineScaleSetVMs.list(); Assertions.assertEquals(TestUtilities.getSize(virtualMachines), vmScaleSet.capacity()); Assertions.assertTrue(TestUtilities.getSize(virtualMachines) > 0); virtualMachineScaleSetVMs.updateInstances(virtualMachines.iterator().next().instanceId()); for (VirtualMachineScaleSetVM vm : virtualMachines) { Assertions.assertNotNull(vm.size()); Assertions.assertEquals(vm.osType(), OperatingSystemTypes.LINUX); Assertions.assertNotNull(vm.computerName().startsWith(vmScaleSet.computerNamePrefix())); Assertions.assertTrue(vm.isOSBasedOnPlatformImage()); Assertions.assertNull(vm.osDiskId()); Assertions.assertNotNull(vm.osUnmanagedDiskVhdUri()); Assertions.assertNull(vm.storedImageUnmanagedVhdUri()); Assertions.assertFalse(vm.isWindowsAutoUpdateEnabled()); Assertions.assertFalse(vm.isWindowsVMAgentProvisioned()); Assertions.assertTrue(vm.administratorUserName().equalsIgnoreCase("jvuser")); VirtualMachineImage vmImage = vm.getOSPlatformImage(); Assertions.assertNotNull(vmImage); Assertions.assertEquals(vm.extensions().size(), vmScaleSet.extensions().size()); Assertions.assertNotNull(vm.powerState()); vm.refreshInstanceView(); } VirtualMachineScaleSetVM virtualMachineScaleSetVM = virtualMachines.iterator().next(); Assertions.assertNotNull(virtualMachineScaleSetVM); virtualMachineScaleSetVM.restart(); virtualMachineScaleSetVM.powerOff(); virtualMachineScaleSetVM.refreshInstanceView(); Assertions.assertEquals(virtualMachineScaleSetVM.powerState(), PowerState.STOPPED); virtualMachineScaleSetVM.start(); for (VirtualMachineScaleSetVM vm : virtualMachines) { PagedIterable<VirtualMachineScaleSetNetworkInterface> nics = vmScaleSet.listNetworkInterfacesByInstanceId(vm.instanceId()); Assertions.assertNotNull(nics); Assertions.assertEquals(TestUtilities.getSize(nics), 1); VirtualMachineScaleSetNetworkInterface nic = nics.iterator().next(); Assertions.assertNotNull(nic.virtualMachineId()); Assertions.assertTrue(nic.virtualMachineId().toLowerCase().equalsIgnoreCase(vm.id())); Assertions.assertNotNull(vm.listNetworkInterfaces()); VirtualMachineScaleSetNetworkInterface nicA = vmScaleSet.getNetworkInterfaceByInstanceId(vm.instanceId(), nic.name()); Assertions.assertNotNull(nicA); VirtualMachineScaleSetNetworkInterface nicB = vm.getNetworkInterface(nic.name()); Assertions.assertNotNull(nicB); } } @Test public void testVirtualMachineScaleSetSkuTypes() { rgName = null; VirtualMachineScaleSetSkuTypes skuType = VirtualMachineScaleSetSkuTypes.STANDARD_A0; Assertions.assertNull(skuType.sku().capacity()); Sku sku1 = skuType.sku(); Assertions.assertNull(sku1.capacity()); sku1.withCapacity(1L); Assertions.assertEquals(sku1.capacity().longValue(), 1); Assertions.assertNull(skuType.sku().capacity()); Sku sku2 = skuType.sku(); Assertions.assertNull(sku2.capacity()); sku2.withCapacity(2L); Assertions.assertEquals(sku2.capacity().longValue(), 2); Assertions.assertNull(skuType.sku().capacity()); Assertions.assertEquals(sku1.capacity().longValue(), 1); } @Test public void canDeleteVMSSInstance() throws Exception { String euapRegion = "eastus2euap"; final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName) .withRegion(euapRegion) .create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(euapRegion) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); VirtualMachineScaleSet vmss = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(euapRegion) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withoutPrimaryInternetFacingLoadBalancer() .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(4) .create(); Assertions.assertEquals(4, vmss.virtualMachines().list().stream().count()); List<String> firstTwoIds = vmss.virtualMachines().list().stream() .limit(2) .map(VirtualMachineScaleSetVM::instanceId) .collect(Collectors.toList()); vmss.virtualMachines().deleteInstances(firstTwoIds, true); Assertions.assertEquals(2, vmss.virtualMachines().list().stream().count()); vmss.virtualMachines().deleteInstances(Collections.singleton(vmss.virtualMachines().list().stream().findFirst().get().instanceId()), false); Assertions.assertEquals(1, vmss.virtualMachines().list().stream().count()); computeManager.virtualMachineScaleSets().deleteInstances(rgName, vmssName, Collections.singleton(vmss.virtualMachines().list().stream().findFirst().get().instanceId()), false); } }
Nit: you can call another constructor using `this(tokenRefresher, refreshProactively, initialToken, getDefaultRefreshTimeBeforeTokenExpiry());`
public CommunicationTokenRefreshOptions(Supplier<Mono<String>> tokenRefresher, boolean refreshProactively, String initialToken) { this.tokenRefresher = tokenRefresher; this.refreshProactively = refreshProactively; this.initialToken = initialToken; this.refreshTimeBeforeTokenExpiry = getDefaultRefreshTimeBeforeTokenExpiry(); }
this.refreshTimeBeforeTokenExpiry = getDefaultRefreshTimeBeforeTokenExpiry();
public CommunicationTokenRefreshOptions(Supplier<Mono<String>> tokenRefresher, boolean refreshProactively, String initialToken) { this.asyncTokenRefresher = tokenRefresher; this.tokenRefresher = null; this.refreshProactively = refreshProactively; this.initialToken = initialToken; }
class CommunicationTokenRefreshOptions { private static final int DEFAULT_EXPIRING_OFFSET_MINUTES = 10; private final Supplier<Mono<String>> tokenRefresher; private final boolean refreshProactively; private final String initialToken; private final Duration refreshTimeBeforeTokenExpiry; /** * Creates a CommunicationTokenRefreshOptions object * * @param tokenRefresher the token refresher to provide capacity to fetch fresh token * @param refreshProactively when set to true, turn on proactive fetching to call * tokenRefresher before token expiry by minutes set * with setCallbackOffsetMinutes or default value of * two minutes */ public CommunicationTokenRefreshOptions(Supplier<Mono<String>> tokenRefresher, boolean refreshProactively) { this.tokenRefresher = tokenRefresher; this.refreshProactively = refreshProactively; this.initialToken = null; this.refreshTimeBeforeTokenExpiry = getDefaultRefreshTimeBeforeTokenExpiry(); } /** * Creates a CommunicationTokenRefreshOptions object * * @param tokenRefresher the token refresher to provide capacity to fetch fresh token * @param refreshProactively when set to true, turn on proactive fetching to call * tokenRefresher before token expiry by minutes set * with setCallbackOffsetMinutes or default value of * two minutes * @param initialToken the optional serialized JWT token */ /** * Creates a CommunicationTokenRefreshOptions object * * @param tokenRefresher the token refresher to provide capacity to fetch fresh token * @param refreshProactively when set to true, turn on proactive fetching to call * tokenRefresher before token expiry by minutes set * with setCallbackOffsetMinutes or default value of * two minutes * @param initialToken the optional serialized JWT token * @param refreshTimeBeforeTokenExpiry The time span before token expiry that tokenRefresher will be called if refreshProactively is true. For example, setting it to 5min means that 5min before the cached token expires, proactive refresh will request a new token. The default value is 10min. */ public CommunicationTokenRefreshOptions(Supplier<Mono<String>> tokenRefresher, boolean refreshProactively, String initialToken, Duration refreshTimeBeforeTokenExpiry) { this.tokenRefresher = tokenRefresher; this.refreshProactively = refreshProactively; this.initialToken = initialToken; this.refreshTimeBeforeTokenExpiry = refreshTimeBeforeTokenExpiry; } /** * @return the token refresher to provide capacity to fetch fresh token */ public Supplier<Mono<String>> getTokenRefresher() { return tokenRefresher; } /** * @return whether or not to refresh token proactively */ public boolean isRefreshProactively() { return refreshProactively; } /** * @return the initial token */ public String getInitialToken() { return initialToken; } /** * @return the time span before token expiry that tokenRefresher will be called if refreshProactively is true */ public Duration getRefreshTimeBeforeTokenExpiry() { return refreshTimeBeforeTokenExpiry; } /** * @return default time span before token expiry that tokenRefresher will be called if refreshProactively is true */ public static Duration getDefaultRefreshTimeBeforeTokenExpiry() { return Duration.ofMinutes(DEFAULT_EXPIRING_OFFSET_MINUTES); } }
class CommunicationTokenRefreshOptions { private final Supplier<String> tokenRefresher; private final Supplier<Mono<String>> asyncTokenRefresher; private boolean refreshProactively; private String initialToken; /** * Creates a CommunicationTokenRefreshOptions object * * @param tokenRefresher The asynchronous callback function that acquires a fresh token * from the Communication Identity API, e.g. by * calling the CommunicationIdentityClient * @param refreshProactively Determines whether the token should be proactively * renewed prior to its expiry or on demand. * @deprecated Use {@link * and chain fluent setter {@link */ @Deprecated public CommunicationTokenRefreshOptions(Supplier<Mono<String>> tokenRefresher, boolean refreshProactively) { this(tokenRefresher, refreshProactively, null); } /** * Creates a CommunicationTokenRefreshOptions object * * @param tokenRefresher The asynchronous callback function that acquires a fresh token * from the Communication Identity API, e.g. by * calling the CommunicationIdentityClient * @param refreshProactively Determines whether the token should be proactively * renewed prior to its expiry or on demand. * @param initialToken The optional serialized JWT token * @deprecated Use {@link * and chain fluent setters {@link * {@link */ @Deprecated /** * Creates a CommunicationTokenRefreshOptions object * * @param tokenRefresher The synchronous callback function that acquires a fresh token from * the Communication Identity API, e.g. by calling the * CommunicationIdentityClient * The returned token must be valid (its expiration date * must be set in the future). */ public CommunicationTokenRefreshOptions(Supplier<String> tokenRefresher) { this.tokenRefresher = tokenRefresher; this.asyncTokenRefresher = null; this.refreshProactively = false; this.initialToken = null; } /** * @return The asynchronous token refresher to provide capacity to fetch fresh token * @deprecated Use synchronous token refresher instead. */ @Deprecated public Supplier<Mono<String>> getTokenRefresher() { return asyncTokenRefresher; } /** * @return The synchronous token refresher to provide capacity to fetch fresh token */ public Supplier<String> getTokenRefresherSync() { return tokenRefresher; } /** * @return Whether or not to refresh token proactively */ public boolean isRefreshProactively() { return refreshProactively; } /** * Set whether the token should be proactively renewed prior to its expiry or on * demand. * * @param refreshProactively the refreshProactively value to set. * @return the CommunicationTokenRefreshOptions object itself. */ public CommunicationTokenRefreshOptions setRefreshProactively(boolean refreshProactively) { this.refreshProactively = refreshProactively; return this; } /** * @return The initial token */ public String getInitialToken() { return initialToken; } /** * Set the optional serialized JWT token * * @param initialToken the initialToken value to set. * @return the CommunicationTokenRefreshOptions object itself. */ public CommunicationTokenRefreshOptions setInitialToken(String initialToken) { this.initialToken = initialToken; return this; } }
since `refreshTimeBeforeTokenExpiry` is being used in the `setToken` method, I'd do the actions in the constructor in a different order - similar to other constructors, meaning: 1) set the `refreshTimeBeforeTokenExpiry` first 2) call `setToken` second
public CommunicationTokenCredential(String token) { Objects.requireNonNull(token, "'token' cannot be null."); setToken(token); CommunicationTokenRefreshOptions tokenRefreshOptions = new CommunicationTokenRefreshOptions(null, false, token); refreshTimeBeforeTokenExpiry = tokenRefreshOptions.getRefreshTimeBeforeTokenExpiry(); }
refreshTimeBeforeTokenExpiry = tokenRefreshOptions.getRefreshTimeBeforeTokenExpiry();
public CommunicationTokenCredential(String token) { Objects.requireNonNull(token, "'token' cannot be null."); setToken(token); }
class CommunicationTokenCredential implements AutoCloseable { private final ClientLogger logger = new ClientLogger(CommunicationTokenCredential.class); private final Duration refreshTimeBeforeTokenExpiry; private AccessToken accessToken; private final TokenParser tokenParser = new TokenParser(); private Supplier<Mono<String>> refresher; private FetchingTask fetchingTask; private boolean isClosed = false; /** * Create with serialized JWT token * * @param token serialized JWT token */ /** * Create with tokenRefreshOptions, which includes a token supplier and optional serialized JWT token. * If refresh proactively is true, callback function tokenRefresher will be called * ahead of the token expiry by the number of minutes specified by * CallbackOffsetMinutes defaulted to ten minutes. To modify this default, call * setCallbackOffsetMinutes after construction * * @param tokenRefreshOptions implementation to supply fresh token when reqested */ public CommunicationTokenCredential(CommunicationTokenRefreshOptions tokenRefreshOptions) { Supplier<Mono<String>> tokenRefresher = tokenRefreshOptions.getTokenRefresher(); Objects.requireNonNull(tokenRefresher, "'tokenRefresher' cannot be null."); this.refreshTimeBeforeTokenExpiry = tokenRefreshOptions.getRefreshTimeBeforeTokenExpiry(); refresher = tokenRefresher; if (tokenRefreshOptions.getInitialToken() != null) { setToken(tokenRefreshOptions.getInitialToken()); if (tokenRefreshOptions.isRefreshProactively()) { OffsetDateTime nextFetchTime = accessToken.getExpiresAt().minusMinutes(refreshTimeBeforeTokenExpiry.toMinutes()); fetchingTask = new FetchingTask(this, nextFetchTime); } } } /** * Get Azure core access token from credential * * @return Asynchronous call to fetch actual token */ public Mono<AccessToken> getToken() { if (isClosed) { return FluxUtil.monoError(logger, new RuntimeException("getToken called on closed CommunicationTokenCredential object")); } if ((accessToken == null || accessToken.isExpired()) && refresher != null) { synchronized (this) { if ((accessToken == null || accessToken.isExpired()) && refresher != null) { return fetchFreshToken() .map(token -> { accessToken = tokenParser.parseJWTToken(token); return accessToken; }); } } } return Mono.just(accessToken); } @Override public void close() throws IOException { isClosed = true; if (fetchingTask != null) { fetchingTask.stopTimer(); fetchingTask = null; } refresher = null; } boolean hasProactiveFetcher() { return fetchingTask != null; } private void setToken(String freshToken) { accessToken = tokenParser.parseJWTToken(freshToken); if (fetchingTask != null) { OffsetDateTime nextFetchTime = accessToken.getExpiresAt().minusMinutes(refreshTimeBeforeTokenExpiry.toMinutes()); fetchingTask.setNextFetchTime(nextFetchTime); } } private Mono<String> fetchFreshToken() { Mono<String> tokenAsync = refresher.get(); if (tokenAsync == null) { return FluxUtil.monoError(logger, new RuntimeException("get() function of the token refresher should not return null.")); } return tokenAsync; } private static class FetchingTask { private final CommunicationTokenCredential host; private Timer expiringTimer; private OffsetDateTime nextFetchTime; FetchingTask(CommunicationTokenCredential tokenHost, OffsetDateTime nextFetchAt) { host = tokenHost; nextFetchTime = nextFetchAt; startTimer(); } private synchronized void setNextFetchTime(OffsetDateTime newFetchTime) { nextFetchTime = newFetchTime; stopTimer(); startTimer(); } private synchronized void startTimer() { expiringTimer = new Timer(); Date expiring = Date.from(nextFetchTime.toInstant()); expiringTimer.schedule(new TokenExpiringTask(this), expiring); } private synchronized void stopTimer() { if (expiringTimer == null) { return; } expiringTimer.cancel(); expiringTimer.purge(); expiringTimer = null; } private Mono<String> fetchFreshToken() { return host.fetchFreshToken(); } private void setToken(String freshTokenString) { host.setToken(freshTokenString); } private class TokenExpiringTask extends TimerTask { private final ClientLogger logger = new ClientLogger(TokenExpiringTask.class); private final FetchingTask tokenCache; TokenExpiringTask(FetchingTask host) { tokenCache = host; } @Override public void run() { try { Mono<String> tokenAsync = tokenCache.fetchFreshToken(); tokenCache.setToken(tokenAsync.block()); } catch (Exception exception) { logger.logExceptionAsError(new RuntimeException(exception)); } } } } }
class CommunicationTokenCredential implements AutoCloseable { private static final int DEFAULT_EXPIRING_OFFSET_MINUTES = 10; private static final int DEFAULT_REFRESH_AFTER_TTL_DIVIDER = 2; private final ClientLogger logger = new ClientLogger(CommunicationTokenCredential.class); private AccessToken accessToken; private final TokenParser tokenParser = new TokenParser(); private Supplier<Mono<String>> refresher; private FetchingTask fetchingTask; private boolean isClosed = false; /** * Create with serialized JWT token * * @param token serialized JWT token */ /** * Create with tokenRefreshOptions, which includes a token supplier and optional serialized JWT token. * If refresh proactively is true, callback function tokenRefresher will be called * ahead of the token expiry by the number of minutes specified by * CallbackOffsetMinutes defaulted to ten minutes. * * @param tokenRefreshOptions implementation to supply fresh token when reqested */ public CommunicationTokenCredential(CommunicationTokenRefreshOptions tokenRefreshOptions) { Supplier<String> tokenRefresher = tokenRefreshOptions.getTokenRefresherSync(); refresher = tokenRefresher != null ? () -> Mono.fromSupplier(tokenRefresher) : tokenRefreshOptions.getTokenRefresher(); Objects.requireNonNull(refresher, "'tokenRefresher' cannot be null."); if (tokenRefreshOptions.getInitialToken() != null) { setToken(tokenRefreshOptions.getInitialToken()); } if (tokenRefreshOptions.isRefreshProactively()) { scheduleRefresher(); } } private void scheduleRefresher() { OffsetDateTime nextFetchTime; if (isTokenExpired(accessToken)) { nextFetchTime = OffsetDateTime.now(); } else { OffsetDateTime now = OffsetDateTime.now(); long tokenTtlMs = accessToken.getExpiresAt().toInstant().toEpochMilli() - now.toInstant().toEpochMilli(); long nextFetchTimeMs = isTokenExpiringSoon() ? tokenTtlMs / DEFAULT_REFRESH_AFTER_TTL_DIVIDER : tokenTtlMs - TimeUnit.MILLISECONDS.convert(DEFAULT_EXPIRING_OFFSET_MINUTES, TimeUnit.MINUTES); nextFetchTime = now.plusNanos(TimeUnit.NANOSECONDS.convert(nextFetchTimeMs, TimeUnit.MILLISECONDS)); } fetchingTask = new FetchingTask(this, nextFetchTime); } private boolean isTokenExpired(AccessToken accessToken) { return accessToken == null || accessToken.isExpired(); } private boolean isTokenExpiringSoon() { return accessToken == null || OffsetDateTime.now().compareTo(accessToken.getExpiresAt().minusMinutes(DEFAULT_EXPIRING_OFFSET_MINUTES)) > 0; } /** * Get Azure core access token from credential * * @return Asynchronous call to fetch actual token */ public Mono<AccessToken> getToken() { if (isClosed) { return FluxUtil.monoError(logger, new RuntimeException("getToken called on closed CommunicationTokenCredential object")); } if (isTokenExpired(accessToken) && refresher != null) { synchronized (this) { if (isTokenExpired(accessToken) && refresher != null) { return fetchFreshToken() .flatMap(token -> { accessToken = tokenParser.parseJWTToken(token); if (isTokenExpired(accessToken)) { return FluxUtil.monoError(logger, new IllegalArgumentException("The token returned from the tokenRefresher is expired.")); } return Mono.just(accessToken); }); } } } return Mono.just(accessToken); } @Override public void close() throws IOException { isClosed = true; if (fetchingTask != null) { fetchingTask.stopTimer(); fetchingTask = null; } refresher = null; } boolean hasProactiveFetcher() { return fetchingTask != null; } private void setToken(String freshToken) { accessToken = tokenParser.parseJWTToken(freshToken); if (hasProactiveFetcher()) { scheduleRefresher(); } } private Mono<String> fetchFreshToken() { Mono<String> tokenAsync = refresher.get(); if (tokenAsync == null) { return FluxUtil.monoError(logger, new RuntimeException("get() function of the token refresher should not return null.")); } return tokenAsync; } private static class FetchingTask { private final CommunicationTokenCredential host; private Timer expiringTimer; private OffsetDateTime nextFetchTime; FetchingTask(CommunicationTokenCredential tokenHost, OffsetDateTime nextFetchAt) { host = tokenHost; nextFetchTime = nextFetchAt; stopTimer(); startTimer(); } private synchronized void startTimer() { expiringTimer = new Timer(); Date expiring = Date.from(nextFetchTime.toInstant()); expiringTimer.schedule(new TokenExpiringTask(this), expiring); } private synchronized void stopTimer() { if (expiringTimer == null) { return; } expiringTimer.cancel(); expiringTimer.purge(); expiringTimer = null; } private Mono<String> fetchFreshToken() { return host.fetchFreshToken(); } private void setToken(String freshTokenString) { host.setToken(freshTokenString); } private boolean isTokenExpired(String freshTokenString) { return host.tokenParser.parseJWTToken(freshTokenString).isExpired(); } private class TokenExpiringTask extends TimerTask { private final ClientLogger logger = new ClientLogger(TokenExpiringTask.class); private final FetchingTask tokenCache; TokenExpiringTask(FetchingTask host) { tokenCache = host; } @Override public void run() { try { Mono<String> tokenAsync = tokenCache.fetchFreshToken(); tokenAsync.subscribe(token -> { if (!tokenCache.isTokenExpired(token)) { tokenCache.setToken(token); } else { logger.logExceptionAsError(new IllegalArgumentException("The token returned from the tokenRefresher is expired.")); } }); } catch (Exception exception) { logger.logExceptionAsError(new RuntimeException(exception)); } } } } }
why sometimes `refreshTimeBeforeTokenExpiry` and sometimes `this.refreshTimeBeforeTokenExpiry`? can we unify it?
public CommunicationTokenCredential(CommunicationTokenRefreshOptions tokenRefreshOptions) { Supplier<Mono<String>> tokenRefresher = tokenRefreshOptions.getTokenRefresher(); Objects.requireNonNull(tokenRefresher, "'tokenRefresher' cannot be null."); this.refreshTimeBeforeTokenExpiry = tokenRefreshOptions.getRefreshTimeBeforeTokenExpiry(); refresher = tokenRefresher; if (tokenRefreshOptions.getInitialToken() != null) { setToken(tokenRefreshOptions.getInitialToken()); if (tokenRefreshOptions.isRefreshProactively()) { OffsetDateTime nextFetchTime = accessToken.getExpiresAt().minusMinutes(refreshTimeBeforeTokenExpiry.toMinutes()); fetchingTask = new FetchingTask(this, nextFetchTime); } } }
this.refreshTimeBeforeTokenExpiry = tokenRefreshOptions.getRefreshTimeBeforeTokenExpiry();
public CommunicationTokenCredential(CommunicationTokenRefreshOptions tokenRefreshOptions) { Supplier<String> tokenRefresher = tokenRefreshOptions.getTokenRefresherSync(); refresher = tokenRefresher != null ? () -> Mono.fromSupplier(tokenRefresher) : tokenRefreshOptions.getTokenRefresher(); Objects.requireNonNull(refresher, "'tokenRefresher' cannot be null."); if (tokenRefreshOptions.getInitialToken() != null) { setToken(tokenRefreshOptions.getInitialToken()); } if (tokenRefreshOptions.isRefreshProactively()) { scheduleRefresher(); } }
class CommunicationTokenCredential implements AutoCloseable { private final ClientLogger logger = new ClientLogger(CommunicationTokenCredential.class); private final Duration refreshTimeBeforeTokenExpiry; private AccessToken accessToken; private final TokenParser tokenParser = new TokenParser(); private Supplier<Mono<String>> refresher; private FetchingTask fetchingTask; private boolean isClosed = false; /** * Create with serialized JWT token * * @param token serialized JWT token */ public CommunicationTokenCredential(String token) { Objects.requireNonNull(token, "'token' cannot be null."); setToken(token); CommunicationTokenRefreshOptions tokenRefreshOptions = new CommunicationTokenRefreshOptions(null, false, token); refreshTimeBeforeTokenExpiry = tokenRefreshOptions.getRefreshTimeBeforeTokenExpiry(); } /** * Create with tokenRefreshOptions, which includes a token supplier and optional serialized JWT token. * If refresh proactively is true, callback function tokenRefresher will be called * ahead of the token expiry by the number of minutes specified by * CallbackOffsetMinutes defaulted to ten minutes. To modify this default, call * setCallbackOffsetMinutes after construction * * @param tokenRefreshOptions implementation to supply fresh token when reqested */ /** * Get Azure core access token from credential * * @return Asynchronous call to fetch actual token */ public Mono<AccessToken> getToken() { if (isClosed) { return FluxUtil.monoError(logger, new RuntimeException("getToken called on closed CommunicationTokenCredential object")); } if ((accessToken == null || accessToken.isExpired()) && refresher != null) { synchronized (this) { if ((accessToken == null || accessToken.isExpired()) && refresher != null) { return fetchFreshToken() .map(token -> { accessToken = tokenParser.parseJWTToken(token); return accessToken; }); } } } return Mono.just(accessToken); } @Override public void close() throws IOException { isClosed = true; if (fetchingTask != null) { fetchingTask.stopTimer(); fetchingTask = null; } refresher = null; } boolean hasProactiveFetcher() { return fetchingTask != null; } private void setToken(String freshToken) { accessToken = tokenParser.parseJWTToken(freshToken); if (fetchingTask != null) { OffsetDateTime nextFetchTime = accessToken.getExpiresAt().minusMinutes(refreshTimeBeforeTokenExpiry.toMinutes()); fetchingTask.setNextFetchTime(nextFetchTime); } } private Mono<String> fetchFreshToken() { Mono<String> tokenAsync = refresher.get(); if (tokenAsync == null) { return FluxUtil.monoError(logger, new RuntimeException("get() function of the token refresher should not return null.")); } return tokenAsync; } private static class FetchingTask { private final CommunicationTokenCredential host; private Timer expiringTimer; private OffsetDateTime nextFetchTime; FetchingTask(CommunicationTokenCredential tokenHost, OffsetDateTime nextFetchAt) { host = tokenHost; nextFetchTime = nextFetchAt; startTimer(); } private synchronized void setNextFetchTime(OffsetDateTime newFetchTime) { nextFetchTime = newFetchTime; stopTimer(); startTimer(); } private synchronized void startTimer() { expiringTimer = new Timer(); Date expiring = Date.from(nextFetchTime.toInstant()); expiringTimer.schedule(new TokenExpiringTask(this), expiring); } private synchronized void stopTimer() { if (expiringTimer == null) { return; } expiringTimer.cancel(); expiringTimer.purge(); expiringTimer = null; } private Mono<String> fetchFreshToken() { return host.fetchFreshToken(); } private void setToken(String freshTokenString) { host.setToken(freshTokenString); } private class TokenExpiringTask extends TimerTask { private final ClientLogger logger = new ClientLogger(TokenExpiringTask.class); private final FetchingTask tokenCache; TokenExpiringTask(FetchingTask host) { tokenCache = host; } @Override public void run() { try { Mono<String> tokenAsync = tokenCache.fetchFreshToken(); tokenCache.setToken(tokenAsync.block()); } catch (Exception exception) { logger.logExceptionAsError(new RuntimeException(exception)); } } } } }
class CommunicationTokenCredential implements AutoCloseable { private static final int DEFAULT_EXPIRING_OFFSET_MINUTES = 10; private static final int DEFAULT_REFRESH_AFTER_TTL_DIVIDER = 2; private final ClientLogger logger = new ClientLogger(CommunicationTokenCredential.class); private AccessToken accessToken; private final TokenParser tokenParser = new TokenParser(); private Supplier<Mono<String>> refresher; private FetchingTask fetchingTask; private boolean isClosed = false; /** * Create with serialized JWT token * * @param token serialized JWT token */ public CommunicationTokenCredential(String token) { Objects.requireNonNull(token, "'token' cannot be null."); setToken(token); } /** * Create with tokenRefreshOptions, which includes a token supplier and optional serialized JWT token. * If refresh proactively is true, callback function tokenRefresher will be called * ahead of the token expiry by the number of minutes specified by * CallbackOffsetMinutes defaulted to ten minutes. * * @param tokenRefreshOptions implementation to supply fresh token when reqested */ private void scheduleRefresher() { OffsetDateTime nextFetchTime; if (isTokenExpired(accessToken)) { nextFetchTime = OffsetDateTime.now(); } else { OffsetDateTime now = OffsetDateTime.now(); long tokenTtlMs = accessToken.getExpiresAt().toInstant().toEpochMilli() - now.toInstant().toEpochMilli(); long nextFetchTimeMs = isTokenExpiringSoon() ? tokenTtlMs / DEFAULT_REFRESH_AFTER_TTL_DIVIDER : tokenTtlMs - TimeUnit.MILLISECONDS.convert(DEFAULT_EXPIRING_OFFSET_MINUTES, TimeUnit.MINUTES); nextFetchTime = now.plusNanos(TimeUnit.NANOSECONDS.convert(nextFetchTimeMs, TimeUnit.MILLISECONDS)); } fetchingTask = new FetchingTask(this, nextFetchTime); } private boolean isTokenExpired(AccessToken accessToken) { return accessToken == null || accessToken.isExpired(); } private boolean isTokenExpiringSoon() { return accessToken == null || OffsetDateTime.now().compareTo(accessToken.getExpiresAt().minusMinutes(DEFAULT_EXPIRING_OFFSET_MINUTES)) > 0; } /** * Get Azure core access token from credential * * @return Asynchronous call to fetch actual token */ public Mono<AccessToken> getToken() { if (isClosed) { return FluxUtil.monoError(logger, new RuntimeException("getToken called on closed CommunicationTokenCredential object")); } if (isTokenExpired(accessToken) && refresher != null) { synchronized (this) { if (isTokenExpired(accessToken) && refresher != null) { return fetchFreshToken() .flatMap(token -> { accessToken = tokenParser.parseJWTToken(token); if (isTokenExpired(accessToken)) { return FluxUtil.monoError(logger, new IllegalArgumentException("The token returned from the tokenRefresher is expired.")); } return Mono.just(accessToken); }); } } } return Mono.just(accessToken); } @Override public void close() throws IOException { isClosed = true; if (fetchingTask != null) { fetchingTask.stopTimer(); fetchingTask = null; } refresher = null; } boolean hasProactiveFetcher() { return fetchingTask != null; } private void setToken(String freshToken) { accessToken = tokenParser.parseJWTToken(freshToken); if (hasProactiveFetcher()) { scheduleRefresher(); } } private Mono<String> fetchFreshToken() { Mono<String> tokenAsync = refresher.get(); if (tokenAsync == null) { return FluxUtil.monoError(logger, new RuntimeException("get() function of the token refresher should not return null.")); } return tokenAsync; } private static class FetchingTask { private final CommunicationTokenCredential host; private Timer expiringTimer; private OffsetDateTime nextFetchTime; FetchingTask(CommunicationTokenCredential tokenHost, OffsetDateTime nextFetchAt) { host = tokenHost; nextFetchTime = nextFetchAt; stopTimer(); startTimer(); } private synchronized void startTimer() { expiringTimer = new Timer(); Date expiring = Date.from(nextFetchTime.toInstant()); expiringTimer.schedule(new TokenExpiringTask(this), expiring); } private synchronized void stopTimer() { if (expiringTimer == null) { return; } expiringTimer.cancel(); expiringTimer.purge(); expiringTimer = null; } private Mono<String> fetchFreshToken() { return host.fetchFreshToken(); } private void setToken(String freshTokenString) { host.setToken(freshTokenString); } private boolean isTokenExpired(String freshTokenString) { return host.tokenParser.parseJWTToken(freshTokenString).isExpired(); } private class TokenExpiringTask extends TimerTask { private final ClientLogger logger = new ClientLogger(TokenExpiringTask.class); private final FetchingTask tokenCache; TokenExpiringTask(FetchingTask host) { tokenCache = host; } @Override public void run() { try { Mono<String> tokenAsync = tokenCache.fetchFreshToken(); tokenAsync.subscribe(token -> { if (!tokenCache.isTokenExpired(token)) { tokenCache.setToken(token); } else { logger.logExceptionAsError(new IllegalArgumentException("The token returned from the tokenRefresher is expired.")); } }); } catch (Exception exception) { logger.logExceptionAsError(new RuntimeException(exception)); } } } } }
Is blocking necessary here? Would subscribing to the mono and handling fresh token there be more "reactive"? Something like ``` tokenAsync.subscribe(token -> { if (!tokenCache.isTokenExpired(token)) { tokenCache.setToken(token); } else { logger.logExceptionAsError(new IllegalArgumentException("The token returned from the tokenRefresher is expired.")); } }); ```
public void run() { try { Mono<String> tokenAsync = tokenCache.fetchFreshToken(); String freshTokenString = tokenAsync.block(); if (!tokenCache.isTokenExpired(freshTokenString)) { tokenCache.setToken(freshTokenString); } else { logger.logExceptionAsError(new IllegalArgumentException("The token returned from the tokenRefresher is expired.")); } } catch (Exception exception) { logger.logExceptionAsError(new RuntimeException(exception)); } }
String freshTokenString = tokenAsync.block();
public void run() { try { Mono<String> tokenAsync = tokenCache.fetchFreshToken(); tokenAsync.subscribe(token -> { if (!tokenCache.isTokenExpired(token)) { tokenCache.setToken(token); } else { logger.logExceptionAsError(new IllegalArgumentException("The token returned from the tokenRefresher is expired.")); } }); } catch (Exception exception) { logger.logExceptionAsError(new RuntimeException(exception)); } }
class TokenExpiringTask extends TimerTask { private final ClientLogger logger = new ClientLogger(TokenExpiringTask.class); private final FetchingTask tokenCache; TokenExpiringTask(FetchingTask host) { tokenCache = host; } @Override }
class TokenExpiringTask extends TimerTask { private final ClientLogger logger = new ClientLogger(TokenExpiringTask.class); private final FetchingTask tokenCache; TokenExpiringTask(FetchingTask host) { tokenCache = host; } @Override }
- `now.plusNanos` can be extracted one level up (less verbose + less code to maintain) - let's extract `2` into a constant (similarly to [js](https://github.com/Azure/azure-sdk-for-js/blob/22acb7535a6c15f6003bbbc4d4a04dafd09ccbeb/sdk/communication/communication-common/src/autoRefreshTokenCredential.ts#L33))
private void scheduleRefresher() { OffsetDateTime nextFetchTime; if (isTokenExpired(accessToken)) { nextFetchTime = OffsetDateTime.now(); } else { OffsetDateTime now = OffsetDateTime.now(); long tokenTtlMs = accessToken.getExpiresAt().toInstant().toEpochMilli() - now.toInstant().toEpochMilli(); nextFetchTime = isTokenExpiringSoon() ? now.plusNanos(TimeUnit.NANOSECONDS.convert(tokenTtlMs / 2, TimeUnit.MILLISECONDS)) : now.plusNanos(TimeUnit.NANOSECONDS.convert(tokenTtlMs, TimeUnit.MILLISECONDS) - TimeUnit.NANOSECONDS.convert(DEFAULT_EXPIRING_OFFSET_MINUTES, TimeUnit.MINUTES)); } fetchingTask = new FetchingTask(this, nextFetchTime); }
? now.plusNanos(TimeUnit.NANOSECONDS.convert(tokenTtlMs / 2, TimeUnit.MILLISECONDS))
private void scheduleRefresher() { OffsetDateTime nextFetchTime; if (isTokenExpired(accessToken)) { nextFetchTime = OffsetDateTime.now(); } else { OffsetDateTime now = OffsetDateTime.now(); long tokenTtlMs = accessToken.getExpiresAt().toInstant().toEpochMilli() - now.toInstant().toEpochMilli(); long nextFetchTimeMs = isTokenExpiringSoon() ? tokenTtlMs / DEFAULT_REFRESH_AFTER_TTL_DIVIDER : tokenTtlMs - TimeUnit.MILLISECONDS.convert(DEFAULT_EXPIRING_OFFSET_MINUTES, TimeUnit.MINUTES); nextFetchTime = now.plusNanos(TimeUnit.NANOSECONDS.convert(nextFetchTimeMs, TimeUnit.MILLISECONDS)); } fetchingTask = new FetchingTask(this, nextFetchTime); }
class CommunicationTokenCredential implements AutoCloseable { private static final int DEFAULT_EXPIRING_OFFSET_MINUTES = 10; private final ClientLogger logger = new ClientLogger(CommunicationTokenCredential.class); private AccessToken accessToken; private final TokenParser tokenParser = new TokenParser(); private Supplier<Mono<String>> refresher; private FetchingTask fetchingTask; private boolean isClosed = false; /** * Create with serialized JWT token * * @param token serialized JWT token */ public CommunicationTokenCredential(String token) { Objects.requireNonNull(token, "'token' cannot be null."); setToken(token); } /** * Create with tokenRefreshOptions, which includes a token supplier and optional serialized JWT token. * If refresh proactively is true, callback function tokenRefresher will be called * ahead of the token expiry by the number of minutes specified by * CallbackOffsetMinutes defaulted to ten minutes. * * @param tokenRefreshOptions implementation to supply fresh token when reqested */ public CommunicationTokenCredential(CommunicationTokenRefreshOptions tokenRefreshOptions) { Supplier<String> tokenRefresher = tokenRefreshOptions.getTokenRefresherSync(); refresher = tokenRefresher != null ? () -> Mono.fromSupplier(tokenRefresher) : tokenRefreshOptions.getTokenRefresher(); Objects.requireNonNull(refresher, "'tokenRefresher' cannot be null."); if (tokenRefreshOptions.getInitialToken() != null) { setToken(tokenRefreshOptions.getInitialToken()); } if (tokenRefreshOptions.isRefreshProactively()) { scheduleRefresher(); } } private boolean isTokenExpired(AccessToken accessToken) { return accessToken == null || accessToken.isExpired(); } private boolean isTokenExpiringSoon() { return accessToken == null || OffsetDateTime.now().compareTo(accessToken.getExpiresAt().minusMinutes(DEFAULT_EXPIRING_OFFSET_MINUTES)) > 0; } /** * Get Azure core access token from credential * * @return Synchronous call to fetch actual token */ public AccessToken getTokenSync() { return getToken().block(); } /** * Get Azure core access token from credential * * @return Asynchronous call to fetch actual token */ public Mono<AccessToken> getToken() { if (isClosed) { return FluxUtil.monoError(logger, new RuntimeException("getToken called on closed CommunicationTokenCredential object")); } if (isTokenExpired(accessToken) && refresher != null) { synchronized (this) { if (isTokenExpired(accessToken) && refresher != null) { return fetchFreshToken() .flatMap(token -> { accessToken = tokenParser.parseJWTToken(token); if (isTokenExpired(accessToken)) { return FluxUtil.monoError(logger, new IllegalArgumentException("The token returned from the tokenRefresher is expired.")); } return Mono.just(accessToken); }); } } } return Mono.just(accessToken); } @Override public void close() throws IOException { isClosed = true; if (fetchingTask != null) { fetchingTask.stopTimer(); fetchingTask = null; } refresher = null; } boolean hasProactiveFetcher() { return fetchingTask != null; } private void setToken(String freshToken) { accessToken = tokenParser.parseJWTToken(freshToken); if (hasProactiveFetcher()) { scheduleRefresher(); } } private Mono<String> fetchFreshToken() { Mono<String> tokenAsync = refresher.get(); if (tokenAsync == null) { return FluxUtil.monoError(logger, new RuntimeException("get() function of the token refresher should not return null.")); } return tokenAsync; } private static class FetchingTask { private final CommunicationTokenCredential host; private Timer expiringTimer; private OffsetDateTime nextFetchTime; FetchingTask(CommunicationTokenCredential tokenHost, OffsetDateTime nextFetchAt) { host = tokenHost; nextFetchTime = nextFetchAt; stopTimer(); startTimer(); } private synchronized void startTimer() { expiringTimer = new Timer(); Date expiring = Date.from(nextFetchTime.toInstant()); expiringTimer.schedule(new TokenExpiringTask(this), expiring); } private synchronized void stopTimer() { if (expiringTimer == null) { return; } expiringTimer.cancel(); expiringTimer.purge(); expiringTimer = null; } private Mono<String> fetchFreshToken() { return host.fetchFreshToken(); } private void setToken(String freshTokenString) { host.setToken(freshTokenString); } private boolean isTokenExpired(String freshTokenString) { return host.tokenParser.parseJWTToken(freshTokenString).isExpired(); } private class TokenExpiringTask extends TimerTask { private final ClientLogger logger = new ClientLogger(TokenExpiringTask.class); private final FetchingTask tokenCache; TokenExpiringTask(FetchingTask host) { tokenCache = host; } @Override public void run() { try { Mono<String> tokenAsync = tokenCache.fetchFreshToken(); tokenAsync.subscribe(token -> { if (!tokenCache.isTokenExpired(token)) { tokenCache.setToken(token); } else { logger.logExceptionAsError(new IllegalArgumentException("The token returned from the tokenRefresher is expired.")); } }); } catch (Exception exception) { logger.logExceptionAsError(new RuntimeException(exception)); } } } } }
class CommunicationTokenCredential implements AutoCloseable { private static final int DEFAULT_EXPIRING_OFFSET_MINUTES = 10; private static final int DEFAULT_REFRESH_AFTER_TTL_DIVIDER = 2; private final ClientLogger logger = new ClientLogger(CommunicationTokenCredential.class); private AccessToken accessToken; private final TokenParser tokenParser = new TokenParser(); private Supplier<Mono<String>> refresher; private FetchingTask fetchingTask; private boolean isClosed = false; /** * Create with serialized JWT token * * @param token serialized JWT token */ public CommunicationTokenCredential(String token) { Objects.requireNonNull(token, "'token' cannot be null."); setToken(token); } /** * Create with tokenRefreshOptions, which includes a token supplier and optional serialized JWT token. * If refresh proactively is true, callback function tokenRefresher will be called * ahead of the token expiry by the number of minutes specified by * CallbackOffsetMinutes defaulted to ten minutes. * * @param tokenRefreshOptions implementation to supply fresh token when reqested */ public CommunicationTokenCredential(CommunicationTokenRefreshOptions tokenRefreshOptions) { Supplier<String> tokenRefresher = tokenRefreshOptions.getTokenRefresherSync(); refresher = tokenRefresher != null ? () -> Mono.fromSupplier(tokenRefresher) : tokenRefreshOptions.getTokenRefresher(); Objects.requireNonNull(refresher, "'tokenRefresher' cannot be null."); if (tokenRefreshOptions.getInitialToken() != null) { setToken(tokenRefreshOptions.getInitialToken()); } if (tokenRefreshOptions.isRefreshProactively()) { scheduleRefresher(); } } private boolean isTokenExpired(AccessToken accessToken) { return accessToken == null || accessToken.isExpired(); } private boolean isTokenExpiringSoon() { return accessToken == null || OffsetDateTime.now().compareTo(accessToken.getExpiresAt().minusMinutes(DEFAULT_EXPIRING_OFFSET_MINUTES)) > 0; } /** * Get Azure core access token from credential * * @return Asynchronous call to fetch actual token */ public Mono<AccessToken> getToken() { if (isClosed) { return FluxUtil.monoError(logger, new RuntimeException("getToken called on closed CommunicationTokenCredential object")); } if (isTokenExpired(accessToken) && refresher != null) { synchronized (this) { if (isTokenExpired(accessToken) && refresher != null) { return fetchFreshToken() .flatMap(token -> { accessToken = tokenParser.parseJWTToken(token); if (isTokenExpired(accessToken)) { return FluxUtil.monoError(logger, new IllegalArgumentException("The token returned from the tokenRefresher is expired.")); } return Mono.just(accessToken); }); } } } return Mono.just(accessToken); } @Override public void close() throws IOException { isClosed = true; if (fetchingTask != null) { fetchingTask.stopTimer(); fetchingTask = null; } refresher = null; } boolean hasProactiveFetcher() { return fetchingTask != null; } private void setToken(String freshToken) { accessToken = tokenParser.parseJWTToken(freshToken); if (hasProactiveFetcher()) { scheduleRefresher(); } } private Mono<String> fetchFreshToken() { Mono<String> tokenAsync = refresher.get(); if (tokenAsync == null) { return FluxUtil.monoError(logger, new RuntimeException("get() function of the token refresher should not return null.")); } return tokenAsync; } private static class FetchingTask { private final CommunicationTokenCredential host; private Timer expiringTimer; private OffsetDateTime nextFetchTime; FetchingTask(CommunicationTokenCredential tokenHost, OffsetDateTime nextFetchAt) { host = tokenHost; nextFetchTime = nextFetchAt; stopTimer(); startTimer(); } private synchronized void startTimer() { expiringTimer = new Timer(); Date expiring = Date.from(nextFetchTime.toInstant()); expiringTimer.schedule(new TokenExpiringTask(this), expiring); } private synchronized void stopTimer() { if (expiringTimer == null) { return; } expiringTimer.cancel(); expiringTimer.purge(); expiringTimer = null; } private Mono<String> fetchFreshToken() { return host.fetchFreshToken(); } private void setToken(String freshTokenString) { host.setToken(freshTokenString); } private boolean isTokenExpired(String freshTokenString) { return host.tokenParser.parseJWTToken(freshTokenString).isExpired(); } private class TokenExpiringTask extends TimerTask { private final ClientLogger logger = new ClientLogger(TokenExpiringTask.class); private final FetchingTask tokenCache; TokenExpiringTask(FetchingTask host) { tokenCache = host; } @Override public void run() { try { Mono<String> tokenAsync = tokenCache.fetchFreshToken(); tokenAsync.subscribe(token -> { if (!tokenCache.isTokenExpired(token)) { tokenCache.setToken(token); } else { logger.logExceptionAsError(new IllegalArgumentException("The token returned from the tokenRefresher is expired.")); } }); } catch (Exception exception) { logger.logExceptionAsError(new RuntimeException(exception)); } } } } }
instance of httpproxy interface instead of type?
public ProxyOptions convert(ProxyAware.Proxy proxy) { if (!StringUtils.hasText(proxy.getHostname()) || proxy.getPort() == null) { LOGGER.debug("Proxy hostname or port is not set."); return null; } final String type = proxy.getType(); ProxyOptions.Type sdkProxyType; if ("http".equalsIgnoreCase(type)) { sdkProxyType = ProxyOptions.Type.HTTP; } else { sdkProxyType = ProxyOptions.Type.SOCKS4; } ProxyOptions proxyOptions = new ProxyOptions(sdkProxyType, new InetSocketAddress(proxy.getHostname(), proxy.getPort())); if (StringUtils.hasText(proxy.getUsername()) && StringUtils.hasText(proxy.getPassword())) { proxyOptions.setCredentials(proxy.getUsername(), proxy.getPassword()); } if (proxy instanceof HttpProxyProperties) { HttpProxyProperties httpProxyProperties = (HttpProxyProperties) proxy; if (StringUtils.hasText(httpProxyProperties.getNonProxyHosts())) { proxyOptions.setNonProxyHosts(httpProxyProperties.getNonProxyHosts()); } } return proxyOptions; }
if (proxy instanceof HttpProxyProperties) {
public ProxyOptions convert(ProxyAware.Proxy proxy) { if (!StringUtils.hasText(proxy.getHostname()) || proxy.getPort() == null) { LOGGER.debug("Proxy hostname or port is not set."); return null; } final String type = proxy.getType(); ProxyOptions.Type sdkProxyType; if ("http".equalsIgnoreCase(type)) { sdkProxyType = ProxyOptions.Type.HTTP; } else { sdkProxyType = ProxyOptions.Type.SOCKS4; } ProxyOptions proxyOptions = new ProxyOptions(sdkProxyType, new InetSocketAddress(proxy.getHostname(), proxy.getPort())); if (StringUtils.hasText(proxy.getUsername()) && StringUtils.hasText(proxy.getPassword())) { proxyOptions.setCredentials(proxy.getUsername(), proxy.getPassword()); } if (proxy instanceof ProxyAware.HttpProxy) { ProxyAware.HttpProxy httpProxyProperties = (ProxyAware.HttpProxy) proxy; if (StringUtils.hasText(httpProxyProperties.getNonProxyHosts())) { proxyOptions.setNonProxyHosts(httpProxyProperties.getNonProxyHosts()); } } return proxyOptions; }
class AzureHttpProxyOptionsConverter implements Converter<ProxyAware.Proxy, ProxyOptions> { private static final Logger LOGGER = LoggerFactory.getLogger(AzureHttpProxyOptionsConverter.class); public static final AzureHttpProxyOptionsConverter HTTP_PROXY_CONVERTER = new AzureHttpProxyOptionsConverter(); @Override }
class AzureHttpProxyOptionsConverter implements Converter<ProxyAware.Proxy, ProxyOptions> { private static final Logger LOGGER = LoggerFactory.getLogger(AzureHttpProxyOptionsConverter.class); public static final AzureHttpProxyOptionsConverter HTTP_PROXY_CONVERTER = new AzureHttpProxyOptionsConverter(); @Override }
nit; Do we need to check again that the connection is not null, line#172 seems doing it already.
public void onTransportError(Event event) { ErrorCondition condition = event.getTransport().getCondition(); if (condition != null) { TRACE_LOGGER.info("Connection.onTransportError: hostname:{}, error:{}", event.getConnection().getHostname(), condition.getDescription()); } else { TRACE_LOGGER.info("Connection.onTransportError: hostname:{}. error:{}", event.getConnection().getHostname(), "no description returned"); } this.messagingFactory.onConnectionError(condition); Connection connection = event.getConnection(); if (connection != null) { if (connection.getTransport() != null) { connection.getTransport().unbind(); } if (connection != null) { connection.free(); } } this.notifyTransportErrors(event); }
connection.free();
public void onTransportError(Event event) { ErrorCondition condition = event.getTransport().getCondition(); if (condition != null) { TRACE_LOGGER.info("Connection.onTransportError: hostname:{}, error:{}", event.getConnection().getHostname(), condition.getDescription()); } else { TRACE_LOGGER.info("Connection.onTransportError: hostname:{}. error:{}", event.getConnection().getHostname(), "no description returned"); } this.messagingFactory.onConnectionError(condition); Connection connection = event.getConnection(); if (connection != null) { if (connection.getTransport() != null) { connection.getTransport().unbind(); } connection.free(); } this.notifyTransportErrors(event); }
class ConnectionHandler extends BaseHandler { private static final SslDomain.VerifyMode VERIFY_MODE; private static final Logger TRACE_LOGGER = LoggerFactory.getLogger(ConnectionHandler.class); protected final IAmqpConnection messagingFactory; static { String verifyModePropValue = System.getProperty(ClientConstants.SSL_VERIFY_MODE_PROPERTY_NAME); if (ClientConstants.SSL_VERIFY_MODE_ANONYMOUS.equalsIgnoreCase(verifyModePropValue)) { VERIFY_MODE = SslDomain.VerifyMode.ANONYMOUS_PEER; } else if (ClientConstants.SSL_VERIFY_MODE_CERTONLY.equalsIgnoreCase(verifyModePropValue)) { VERIFY_MODE = SslDomain.VerifyMode.VERIFY_PEER; } else { VERIFY_MODE = SslDomain.VerifyMode.VERIFY_PEER_NAME; } } protected ConnectionHandler(final IAmqpConnection messagingFactory) { add(new Handshaker()); this.messagingFactory = messagingFactory; } public static ConnectionHandler create(TransportType transportType, IAmqpConnection messagingFactory) { switch (transportType) { case AMQP_WEB_SOCKETS: if (ProxyConnectionHandler.shouldUseProxy(messagingFactory.getHostName())) { return new ProxyConnectionHandler(messagingFactory); } else { return new WebSocketConnectionHandler(messagingFactory); } case AMQP: default: return new ConnectionHandler(messagingFactory); } } @Override public void onConnectionInit(Event event) { final Connection connection = event.getConnection(); final String hostName = new StringBuilder(messagingFactory.getHostName()) .append(":") .append(String.valueOf(this.getProtocolPort())) .toString(); TRACE_LOGGER.debug("onConnectionInit: hostname:{}", hostName); connection.setHostname(hostName); connection.setContainer(StringUtil.getShortRandomString()); final Map<Symbol, Object> connectionProperties = new HashMap<Symbol, Object>(); connectionProperties.put(AmqpConstants.PRODUCT, ClientConstants.PRODUCT_NAME); connectionProperties.put(AmqpConstants.VERSION, ClientConstants.CURRENT_JAVACLIENT_VERSION); connectionProperties.put(AmqpConstants.PLATFORM, ClientConstants.PLATFORM_INFO); connection.setProperties(connectionProperties); connection.open(); } protected IAmqpConnection getMessagingFactory() { return this.messagingFactory; } public void addTransportLayers(final Event event, final TransportInternal transport) { SslDomain domain = Proton.sslDomain(); domain.init(SslDomain.Mode.CLIENT); if (VERIFY_MODE == SslDomain.VerifyMode.VERIFY_PEER_NAME) { try { SSLContext defaultContext = SSLContext.getDefault(); StrictTLSContextSpi strictTlsContextSpi = new StrictTLSContextSpi(defaultContext); SSLContext strictTlsContext = new StrictTLSContext(strictTlsContextSpi, defaultContext.getProvider(), defaultContext.getProtocol()); domain.setSslContext(strictTlsContext); domain.setPeerAuthentication(SslDomain.VerifyMode.VERIFY_PEER_NAME); SslPeerDetails peerDetails = Proton.sslPeerDetails(this.messagingFactory.getHostName(), this.getProtocolPort()); transport.ssl(domain, peerDetails); } catch (NoSuchAlgorithmException e) { TRACE_LOGGER.info("Default SSL algorithm not found in JRE. Please check your JRE setup.", e); } } else if (VERIFY_MODE == SslDomain.VerifyMode.VERIFY_PEER) { try { SSLContext defaultContext = SSLContext.getDefault(); domain.setSslContext(defaultContext); domain.setPeerAuthentication(SslDomain.VerifyMode.VERIFY_PEER); transport.ssl(domain); } catch (NoSuchAlgorithmException e) { TRACE_LOGGER.info("Default SSL algorithm not found in JRE. Please check your JRE setup.", e); } } else { domain.setPeerAuthentication(SslDomain.VerifyMode.ANONYMOUS_PEER); transport.ssl(domain); } } protected void notifyTransportErrors(final Event event) { /* no-op */ } public String getOutboundSocketHostName() { return messagingFactory.getHostName(); } public int getOutboundSocketPort() { return this.getProtocolPort(); } public int getProtocolPort() { return ClientConstants.AMQPS_PORT; } public int getMaxFrameSize() { return AmqpConstants.MAX_FRAME_SIZE; } @Override public void onConnectionBound(Event event) { TRACE_LOGGER.debug("onConnectionBound: hostname:{}", event.getConnection().getHostname()); Transport transport = event.getTransport(); this.addTransportLayers(event, (TransportInternal) transport); Sasl sasl = transport.sasl(); sasl.setMechanisms("ANONYMOUS"); } @Override @Override public void onConnectionRemoteOpen(Event event) { TRACE_LOGGER.debug("Connection.onConnectionRemoteOpen: hostname:{}, remotecontainer:{}", event.getConnection().getHostname(), event.getConnection().getRemoteContainer()); this.messagingFactory.onConnectionOpen(); } @Override public void onConnectionRemoteClose(Event event) { final Connection connection = event.getConnection(); final ErrorCondition error = connection.getRemoteCondition(); TRACE_LOGGER.debug("onConnectionRemoteClose: hostname:{},errorCondition:{}", connection.getHostname(), error != null ? error.getCondition() + "," + error.getDescription() : null); boolean shouldFreeConnection = connection.getLocalState() == EndpointState.CLOSED; this.messagingFactory.onConnectionError(error); if (shouldFreeConnection) { connection.free(); } } @Override public void onConnectionFinal(Event event) { TRACE_LOGGER.debug("onConnectionFinal: hostname:{}", event.getConnection().getHostname()); } @Override public void onConnectionLocalClose(Event event) { Connection connection = event.getConnection(); TRACE_LOGGER.debug("onConnectionLocalClose: hostname:{}", connection.getHostname()); if (connection.getRemoteState() == EndpointState.CLOSED) { if (connection.getTransport() != null) { connection.getTransport().unbind(); } connection.free(); } } }
class ConnectionHandler extends BaseHandler { private static final SslDomain.VerifyMode VERIFY_MODE; private static final Logger TRACE_LOGGER = LoggerFactory.getLogger(ConnectionHandler.class); protected final IAmqpConnection messagingFactory; static { String verifyModePropValue = System.getProperty(ClientConstants.SSL_VERIFY_MODE_PROPERTY_NAME); if (ClientConstants.SSL_VERIFY_MODE_ANONYMOUS.equalsIgnoreCase(verifyModePropValue)) { VERIFY_MODE = SslDomain.VerifyMode.ANONYMOUS_PEER; } else if (ClientConstants.SSL_VERIFY_MODE_CERTONLY.equalsIgnoreCase(verifyModePropValue)) { VERIFY_MODE = SslDomain.VerifyMode.VERIFY_PEER; } else { VERIFY_MODE = SslDomain.VerifyMode.VERIFY_PEER_NAME; } } protected ConnectionHandler(final IAmqpConnection messagingFactory) { add(new Handshaker()); this.messagingFactory = messagingFactory; } public static ConnectionHandler create(TransportType transportType, IAmqpConnection messagingFactory) { switch (transportType) { case AMQP_WEB_SOCKETS: if (ProxyConnectionHandler.shouldUseProxy(messagingFactory.getHostName())) { return new ProxyConnectionHandler(messagingFactory); } else { return new WebSocketConnectionHandler(messagingFactory); } case AMQP: default: return new ConnectionHandler(messagingFactory); } } @Override public void onConnectionInit(Event event) { final Connection connection = event.getConnection(); final String hostName = new StringBuilder(messagingFactory.getHostName()) .append(":") .append(String.valueOf(this.getProtocolPort())) .toString(); TRACE_LOGGER.debug("onConnectionInit: hostname:{}", hostName); connection.setHostname(hostName); connection.setContainer(StringUtil.getShortRandomString()); final Map<Symbol, Object> connectionProperties = new HashMap<Symbol, Object>(); connectionProperties.put(AmqpConstants.PRODUCT, ClientConstants.PRODUCT_NAME); connectionProperties.put(AmqpConstants.VERSION, ClientConstants.CURRENT_JAVACLIENT_VERSION); connectionProperties.put(AmqpConstants.PLATFORM, ClientConstants.PLATFORM_INFO); connection.setProperties(connectionProperties); connection.open(); } protected IAmqpConnection getMessagingFactory() { return this.messagingFactory; } public void addTransportLayers(final Event event, final TransportInternal transport) { SslDomain domain = Proton.sslDomain(); domain.init(SslDomain.Mode.CLIENT); if (VERIFY_MODE == SslDomain.VerifyMode.VERIFY_PEER_NAME) { try { SSLContext defaultContext = SSLContext.getDefault(); StrictTLSContextSpi strictTlsContextSpi = new StrictTLSContextSpi(defaultContext); SSLContext strictTlsContext = new StrictTLSContext(strictTlsContextSpi, defaultContext.getProvider(), defaultContext.getProtocol()); domain.setSslContext(strictTlsContext); domain.setPeerAuthentication(SslDomain.VerifyMode.VERIFY_PEER_NAME); SslPeerDetails peerDetails = Proton.sslPeerDetails(this.messagingFactory.getHostName(), this.getProtocolPort()); transport.ssl(domain, peerDetails); } catch (NoSuchAlgorithmException e) { TRACE_LOGGER.info("Default SSL algorithm not found in JRE. Please check your JRE setup.", e); } } else if (VERIFY_MODE == SslDomain.VerifyMode.VERIFY_PEER) { try { SSLContext defaultContext = SSLContext.getDefault(); domain.setSslContext(defaultContext); domain.setPeerAuthentication(SslDomain.VerifyMode.VERIFY_PEER); transport.ssl(domain); } catch (NoSuchAlgorithmException e) { TRACE_LOGGER.info("Default SSL algorithm not found in JRE. Please check your JRE setup.", e); } } else { domain.setPeerAuthentication(SslDomain.VerifyMode.ANONYMOUS_PEER); transport.ssl(domain); } } protected void notifyTransportErrors(final Event event) { /* no-op */ } public String getOutboundSocketHostName() { return messagingFactory.getHostName(); } public int getOutboundSocketPort() { return this.getProtocolPort(); } public int getProtocolPort() { return ClientConstants.AMQPS_PORT; } public int getMaxFrameSize() { return AmqpConstants.MAX_FRAME_SIZE; } @Override public void onConnectionBound(Event event) { TRACE_LOGGER.debug("onConnectionBound: hostname:{}", event.getConnection().getHostname()); Transport transport = event.getTransport(); this.addTransportLayers(event, (TransportInternal) transport); Sasl sasl = transport.sasl(); sasl.setMechanisms("ANONYMOUS"); } @Override @Override public void onConnectionRemoteOpen(Event event) { TRACE_LOGGER.debug("Connection.onConnectionRemoteOpen: hostname:{}, remotecontainer:{}", event.getConnection().getHostname(), event.getConnection().getRemoteContainer()); this.messagingFactory.onConnectionOpen(); } @Override public void onConnectionRemoteClose(Event event) { final Connection connection = event.getConnection(); final ErrorCondition error = connection.getRemoteCondition(); TRACE_LOGGER.debug("onConnectionRemoteClose: hostname:{},errorCondition:{}", connection.getHostname(), error != null ? error.getCondition() + "," + error.getDescription() : null); boolean shouldFreeConnection = connection.getLocalState() == EndpointState.CLOSED; this.messagingFactory.onConnectionError(error); if (shouldFreeConnection) { connection.free(); } } @Override public void onConnectionFinal(Event event) { TRACE_LOGGER.debug("onConnectionFinal: hostname:{}", event.getConnection().getHostname()); } @Override public void onConnectionLocalClose(Event event) { Connection connection = event.getConnection(); TRACE_LOGGER.debug("onConnectionLocalClose: hostname:{}", connection.getHostname()); if (connection.getRemoteState() == EndpointState.CLOSED) { if (connection.getTransport() != null) { connection.getTransport().unbind(); } connection.free(); } } }
Would it be simplified it: ``` if (!lease.getOwner().equalsIgnoreCase(serverLease.getOwner())) { ``` If serverLease.getOwner() is null, the comparison simply returns false?
public Mono<Lease> renew(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } return this.leaseDocumentClient.readItem(lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), InternalObjectNode.class) .onErrorResume( ex -> { if (ex instanceof CosmosException) { CosmosException e = (CosmosException) ex; if (e.getStatusCode() == ChangeFeedHelper.HTTP_STATUS_CODE_NOT_FOUND) { logger.info("Partition {} failed to renew lease. The lease is gone already.", lease.getLeaseToken()); throw new LeaseLostException(lease); } } return Mono.error(ex); }) .map( documentResourceResponse -> ServiceItemLease.fromDocument(BridgeInternal.getProperties(documentResourceResponse))) .flatMap( refreshedLease -> this.leaseUpdater.updateLease( refreshedLease, lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), serverLease -> { if (serverLease.getOwner() == null) { logger.info("Partition {} lease was taken over and released by a different owner", lease.getLeaseToken()); throw new LeaseLostException(lease); } else if (serverLease.getOwner() != null && !serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { logger.info("Partition {} lease was taken over by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } return serverLease; }) ); }
if (serverLease.getOwner() == null) {
public Mono<Lease> renew(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } return this.leaseDocumentClient.readItem(lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), InternalObjectNode.class) .onErrorResume( ex -> { if (ex instanceof CosmosException) { CosmosException e = (CosmosException) ex; if (e.getStatusCode() == ChangeFeedHelper.HTTP_STATUS_CODE_NOT_FOUND) { logger.info("Partition {} failed to renew lease. The lease is gone already.", lease.getLeaseToken()); throw new LeaseLostException(lease); } } return Mono.error(ex); }) .map( documentResourceResponse -> ServiceItemLease.fromDocument(BridgeInternal.getProperties(documentResourceResponse))) .flatMap( refreshedLease -> this.leaseUpdater.updateLease( refreshedLease, lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), serverLease -> { if (serverLease.getOwner() == null) { logger.info("Partition {} lease was taken over and released by a different owner", lease.getLeaseToken()); throw new LeaseLostException(lease); } else if (!serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { logger.info("Partition {} lease was taken over by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } return serverLease; }) ); }
class LeaseStoreManagerImpl implements LeaseStoreManager, LeaseStoreManager.LeaseStoreManagerBuilderDefinition { private final String LEASE_STORE_MANAGER_LEASE_SUFFIX = ".."; private final Logger logger = LoggerFactory.getLogger(LeaseStoreManagerImpl.class); private LeaseStoreManagerSettings settings; private ChangeFeedContextClient leaseDocumentClient; private RequestOptionsFactory requestOptionsFactory; private ServiceItemLeaseUpdater leaseUpdater; private LeaseStore leaseStore; public static LeaseStoreManagerBuilderDefinition builder() { return new LeaseStoreManagerImpl(); } public LeaseStoreManagerImpl() { this.settings = new LeaseStoreManagerSettings(); } @Override public LeaseStoreManagerBuilderDefinition leaseContextClient(ChangeFeedContextClient leaseContextClient) { if (leaseContextClient == null) { throw new IllegalArgumentException("leaseContextClient"); } this.leaseDocumentClient = leaseContextClient; return this; } @Override public LeaseStoreManagerBuilderDefinition leasePrefix(String leasePrefix) { if (leasePrefix == null) { throw new IllegalArgumentException("leasePrefix"); } this.settings.withContainerNamePrefix(leasePrefix); return this; } @Override public LeaseStoreManagerBuilderDefinition leaseCollectionLink(CosmosAsyncContainer leaseCollectionLink) { if (leaseCollectionLink == null) { throw new IllegalArgumentException("leaseCollectionLink"); } this.settings.withLeaseCollectionLink(leaseCollectionLink); return this; } @Override public LeaseStoreManagerBuilderDefinition requestOptionsFactory(RequestOptionsFactory requestOptionsFactory) { if (requestOptionsFactory == null) { throw new IllegalArgumentException("requestOptionsFactory"); } this.requestOptionsFactory = requestOptionsFactory; return this; } @Override public LeaseStoreManagerBuilderDefinition hostName(String hostName) { if (hostName == null) { throw new IllegalArgumentException("hostName"); } this.settings.withHostName(hostName); return this; } @Override public Mono<LeaseStoreManager> build() { if (this.settings == null) { throw new IllegalArgumentException("properties"); } if (this.settings.getContainerNamePrefix() == null) { throw new IllegalArgumentException("properties.containerNamePrefix"); } if (this.settings.getLeaseCollectionLink() == null) { throw new IllegalArgumentException("properties.leaseCollectionLink"); } if (this.settings.getHostName() == null || this.settings.getHostName().isEmpty()) { throw new IllegalArgumentException("properties.hostName"); } if (this.leaseDocumentClient == null) { throw new IllegalArgumentException("leaseDocumentClient"); } if (this.requestOptionsFactory == null) { throw new IllegalArgumentException("requestOptionsFactory"); } if (this.leaseUpdater == null) { this.leaseUpdater = new DocumentServiceLeaseUpdaterImpl(leaseDocumentClient); } this.leaseStore = new DocumentServiceLeaseStore( this.leaseDocumentClient, this.settings.getContainerNamePrefix(), this.settings.getLeaseCollectionLink(), this.requestOptionsFactory); return Mono.just(this); } @Override public Flux<Lease> getAllLeases() { return this.listDocuments(this.getPartitionLeasePrefix()) .map(documentServiceLease -> documentServiceLease); } @Override public Flux<Lease> getOwnedLeases() { return this.getAllLeases() .filter(lease -> lease.getOwner() != null && lease.getOwner().equalsIgnoreCase(this.settings.getHostName())); } @Override public Mono<Lease> createLeaseIfNotExist(String leaseToken, String continuationToken) { if (leaseToken == null) { throw new IllegalArgumentException("leaseToken"); } String leaseDocId = this.getDocumentId(leaseToken); ServiceItemLease documentServiceLease = new ServiceItemLease() .withId(leaseDocId) .withLeaseToken(leaseToken) .withContinuationToken(continuationToken); return this.leaseDocumentClient.createItem(this.settings.getLeaseCollectionLink(), documentServiceLease, null, false) .onErrorResume( ex -> { if (ex instanceof CosmosException) { CosmosException e = (CosmosException) ex; if (e.getStatusCode() == ChangeFeedHelper.HTTP_STATUS_CODE_CONFLICT) { logger.info("Some other host created lease for {}.", leaseToken); return Mono.empty(); } } return Mono.error(ex); }) .map(documentResourceResponse -> { if (documentResourceResponse == null) { return null; } InternalObjectNode document = BridgeInternal.getProperties(documentResourceResponse); logger.info("Created lease for partition {}.", leaseToken); return documentServiceLease .withId(document.getId()) .withETag(document.getETag()) .withTs(ModelBridgeInternal.getStringFromJsonSerializable(document, Constants.Properties.LAST_MODIFIED)); }); } @Override public Mono<Void> delete(Lease lease) { if (lease == null || lease.getId() == null) { throw new IllegalArgumentException("lease"); } return this.leaseDocumentClient .deleteItem(lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease)) .onErrorResume( ex -> { if (ex instanceof CosmosException) { CosmosException e = (CosmosException) ex; if (e.getStatusCode() == ChangeFeedHelper.HTTP_STATUS_CODE_NOT_FOUND) { return Mono.empty(); } } return Mono.error(ex); }) .map( documentResourceResponse -> true) .then(); } @Override public Mono<Lease> acquire(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } String oldOwner = lease.getOwner(); return this.leaseUpdater.updateLease( lease, lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), serverLease -> { if (serverLease.getOwner() != null && !serverLease.getOwner().equalsIgnoreCase(oldOwner)) { logger.info("Partition {} lease was taken over by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } serverLease.setOwner(this.settings.getHostName()); serverLease.setProperties(lease.getProperties()); return serverLease; }); } @Override public Mono<Void> release(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } return this.leaseDocumentClient.readItem(lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), InternalObjectNode.class) .onErrorResume( ex -> { if (ex instanceof CosmosException) { CosmosException e = (CosmosException) ex; if (e.getStatusCode() == ChangeFeedHelper.HTTP_STATUS_CODE_NOT_FOUND) { logger.info("Partition {} failed to renew lease. The lease is gone already.", lease.getLeaseToken()); throw new LeaseLostException(lease); } } return Mono.error(ex); }) .map( documentResourceResponse -> ServiceItemLease.fromDocument(BridgeInternal.getProperties(documentResourceResponse))) .flatMap( refreshedLease -> this.leaseUpdater.updateLease( refreshedLease, lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), serverLease -> { if (serverLease.getOwner() != null && !serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { logger.info("Partition {} no need to release lease. The lease was already taken by another host '{}'.", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } serverLease.setOwner(null); return serverLease; }) ).then(); } @Override @Override public Mono<Lease> updateProperties(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } if (lease.getOwner() != null && !lease.getOwner().equalsIgnoreCase(this.settings.getHostName())) { logger.info("Partition '{}' lease was taken over by owner '{}' before lease item update", lease.getLeaseToken(), lease.getOwner()); throw new LeaseLostException(lease); } return this.leaseUpdater.updateLease( lease, lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), serverLease -> { if (serverLease.getOwner() != null && !serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { logger.info("Partition '{}' lease was taken over by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } serverLease.setProperties(lease.getProperties()); return serverLease; }); } @Override public Mono<Lease> checkpoint(Lease lease, String continuationToken) { if (lease == null) { throw new IllegalArgumentException("lease"); } if (continuationToken == null || continuationToken.isEmpty()) { throw new IllegalArgumentException("continuationToken must be a non-empty string"); } return this.leaseDocumentClient.readItem(lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), InternalObjectNode.class) .map( documentResourceResponse -> ServiceItemLease.fromDocument(BridgeInternal.getProperties(documentResourceResponse))) .flatMap( refreshedLease -> this.leaseUpdater.updateLease( refreshedLease, lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), serverLease -> { if (serverLease.getOwner() == null) { logger.info("Partition {} lease was taken over and released by a different owner", lease.getLeaseToken()); throw new LeaseLostException(lease); } else if (serverLease.getOwner() != null && !serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { logger.info("Partition {} lease was taken over by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } serverLease.setContinuationToken(continuationToken); return serverLease; })) .doOnError(throwable -> { logger.info("Partition {} lease with token '{}' failed to checkpoint for owner '{}' with continuation token '{}'", lease.getLeaseToken(), lease.getConcurrencyToken(), lease.getOwner(), lease.getContinuationToken()); }); } @Override public Mono<Boolean> isInitialized() { return this.leaseStore.isInitialized(); } @Override public Mono<Boolean> markInitialized() { return this.leaseStore.markInitialized(); } @Override public Mono<Boolean> acquireInitializationLock(Duration lockExpirationTime) { return this.leaseStore.acquireInitializationLock(lockExpirationTime); } @Override public Mono<Boolean> releaseInitializationLock() { return this.leaseStore.releaseInitializationLock(); } private Flux<ServiceItemLease> listDocuments(String prefix) { if (prefix == null || prefix.isEmpty()) { throw new IllegalArgumentException("prefix"); } SqlParameter param = new SqlParameter(); param.setName("@PartitionLeasePrefix"); param.setValue(prefix); SqlQuerySpec querySpec = new SqlQuerySpec( "SELECT * FROM c WHERE STARTSWITH(c.id, @PartitionLeasePrefix)", Collections.singletonList(param)); Flux<FeedResponse<InternalObjectNode>> query = this.leaseDocumentClient.queryItems( this.settings.getLeaseCollectionLink(), querySpec, this.requestOptionsFactory.createQueryRequestOptions(), InternalObjectNode.class); return query.flatMap( documentFeedResponse -> Flux.fromIterable(documentFeedResponse.getResults())) .map(ServiceItemLease::fromDocument); } private String getDocumentId(String leaseToken) { return this.getPartitionLeasePrefix() + leaseToken; } private String getPartitionLeasePrefix() { return this.settings.getContainerNamePrefix() + LEASE_STORE_MANAGER_LEASE_SUFFIX; } }
class LeaseStoreManagerImpl implements LeaseStoreManager, LeaseStoreManager.LeaseStoreManagerBuilderDefinition { private final String LEASE_STORE_MANAGER_LEASE_SUFFIX = ".."; private final Logger logger = LoggerFactory.getLogger(LeaseStoreManagerImpl.class); private LeaseStoreManagerSettings settings; private ChangeFeedContextClient leaseDocumentClient; private RequestOptionsFactory requestOptionsFactory; private ServiceItemLeaseUpdater leaseUpdater; private LeaseStore leaseStore; public static LeaseStoreManagerBuilderDefinition builder() { return new LeaseStoreManagerImpl(); } public LeaseStoreManagerImpl() { this.settings = new LeaseStoreManagerSettings(); } @Override public LeaseStoreManagerBuilderDefinition leaseContextClient(ChangeFeedContextClient leaseContextClient) { if (leaseContextClient == null) { throw new IllegalArgumentException("leaseContextClient"); } this.leaseDocumentClient = leaseContextClient; return this; } @Override public LeaseStoreManagerBuilderDefinition leasePrefix(String leasePrefix) { if (leasePrefix == null) { throw new IllegalArgumentException("leasePrefix"); } this.settings.withContainerNamePrefix(leasePrefix); return this; } @Override public LeaseStoreManagerBuilderDefinition leaseCollectionLink(CosmosAsyncContainer leaseCollectionLink) { if (leaseCollectionLink == null) { throw new IllegalArgumentException("leaseCollectionLink"); } this.settings.withLeaseCollectionLink(leaseCollectionLink); return this; } @Override public LeaseStoreManagerBuilderDefinition requestOptionsFactory(RequestOptionsFactory requestOptionsFactory) { if (requestOptionsFactory == null) { throw new IllegalArgumentException("requestOptionsFactory"); } this.requestOptionsFactory = requestOptionsFactory; return this; } @Override public LeaseStoreManagerBuilderDefinition hostName(String hostName) { if (hostName == null) { throw new IllegalArgumentException("hostName"); } this.settings.withHostName(hostName); return this; } @Override public Mono<LeaseStoreManager> build() { if (this.settings == null) { throw new IllegalArgumentException("properties"); } if (this.settings.getContainerNamePrefix() == null) { throw new IllegalArgumentException("properties.containerNamePrefix"); } if (this.settings.getLeaseCollectionLink() == null) { throw new IllegalArgumentException("properties.leaseCollectionLink"); } if (this.settings.getHostName() == null || this.settings.getHostName().isEmpty()) { throw new IllegalArgumentException("properties.hostName"); } if (this.leaseDocumentClient == null) { throw new IllegalArgumentException("leaseDocumentClient"); } if (this.requestOptionsFactory == null) { throw new IllegalArgumentException("requestOptionsFactory"); } if (this.leaseUpdater == null) { this.leaseUpdater = new DocumentServiceLeaseUpdaterImpl(leaseDocumentClient); } this.leaseStore = new DocumentServiceLeaseStore( this.leaseDocumentClient, this.settings.getContainerNamePrefix(), this.settings.getLeaseCollectionLink(), this.requestOptionsFactory); return Mono.just(this); } @Override public Flux<Lease> getAllLeases() { return this.listDocuments(this.getPartitionLeasePrefix()) .map(documentServiceLease -> documentServiceLease); } @Override public Flux<Lease> getOwnedLeases() { return this.getAllLeases() .filter(lease -> lease.getOwner() != null && lease.getOwner().equalsIgnoreCase(this.settings.getHostName())); } @Override public Mono<Lease> createLeaseIfNotExist(String leaseToken, String continuationToken) { if (leaseToken == null) { throw new IllegalArgumentException("leaseToken"); } String leaseDocId = this.getDocumentId(leaseToken); ServiceItemLease documentServiceLease = new ServiceItemLease() .withId(leaseDocId) .withLeaseToken(leaseToken) .withContinuationToken(continuationToken); return this.leaseDocumentClient.createItem(this.settings.getLeaseCollectionLink(), documentServiceLease, null, false) .onErrorResume( ex -> { if (ex instanceof CosmosException) { CosmosException e = (CosmosException) ex; if (e.getStatusCode() == ChangeFeedHelper.HTTP_STATUS_CODE_CONFLICT) { logger.info("Some other host created lease for {}.", leaseToken); return Mono.empty(); } } return Mono.error(ex); }) .map(documentResourceResponse -> { if (documentResourceResponse == null) { return null; } InternalObjectNode document = BridgeInternal.getProperties(documentResourceResponse); logger.info("Created lease for partition {}.", leaseToken); return documentServiceLease .withId(document.getId()) .withETag(document.getETag()) .withTs(ModelBridgeInternal.getStringFromJsonSerializable(document, Constants.Properties.LAST_MODIFIED)); }); } @Override public Mono<Void> delete(Lease lease) { if (lease == null || lease.getId() == null) { throw new IllegalArgumentException("lease"); } return this.leaseDocumentClient .deleteItem(lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease)) .onErrorResume( ex -> { if (ex instanceof CosmosException) { CosmosException e = (CosmosException) ex; if (e.getStatusCode() == ChangeFeedHelper.HTTP_STATUS_CODE_NOT_FOUND) { return Mono.empty(); } } return Mono.error(ex); }) .map( documentResourceResponse -> true) .then(); } @Override public Mono<Lease> acquire(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } String oldOwner = lease.getOwner(); return this.leaseUpdater.updateLease( lease, lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), serverLease -> { if (serverLease.getOwner() != null && !serverLease.getOwner().equalsIgnoreCase(oldOwner)) { logger.info("Partition {} lease was taken over by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } serverLease.setOwner(this.settings.getHostName()); serverLease.setProperties(lease.getProperties()); return serverLease; }); } @Override public Mono<Void> release(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } return this.leaseDocumentClient.readItem(lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), InternalObjectNode.class) .onErrorResume( ex -> { if (ex instanceof CosmosException) { CosmosException e = (CosmosException) ex; if (e.getStatusCode() == ChangeFeedHelper.HTTP_STATUS_CODE_NOT_FOUND) { logger.info("Partition {} failed to renew lease. The lease is gone already.", lease.getLeaseToken()); throw new LeaseLostException(lease); } } return Mono.error(ex); }) .map( documentResourceResponse -> ServiceItemLease.fromDocument(BridgeInternal.getProperties(documentResourceResponse))) .flatMap( refreshedLease -> this.leaseUpdater.updateLease( refreshedLease, lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), serverLease -> { if (serverLease.getOwner() != null && !serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { logger.info("Partition {} no need to release lease. The lease was already taken by another host '{}'.", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } serverLease.setOwner(null); return serverLease; }) ).then(); } @Override @Override public Mono<Lease> updateProperties(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } if (lease.getOwner() != null && !lease.getOwner().equalsIgnoreCase(this.settings.getHostName())) { logger.info("Partition '{}' lease was taken over by owner '{}' before lease item update", lease.getLeaseToken(), lease.getOwner()); throw new LeaseLostException(lease); } return this.leaseUpdater.updateLease( lease, lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), serverLease -> { if (serverLease.getOwner() != null && !serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { logger.info("Partition '{}' lease was taken over by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } serverLease.setProperties(lease.getProperties()); return serverLease; }); } @Override public Mono<Lease> checkpoint(Lease lease, String continuationToken) { if (lease == null) { throw new IllegalArgumentException("lease"); } if (continuationToken == null || continuationToken.isEmpty()) { throw new IllegalArgumentException("continuationToken must be a non-empty string"); } return this.leaseDocumentClient.readItem(lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), InternalObjectNode.class) .map( documentResourceResponse -> ServiceItemLease.fromDocument(BridgeInternal.getProperties(documentResourceResponse))) .flatMap( refreshedLease -> this.leaseUpdater.updateLease( refreshedLease, lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), serverLease -> { if (serverLease.getOwner() == null) { logger.info("Partition {} lease was taken over and released by a different owner", lease.getLeaseToken()); throw new LeaseLostException(lease); } else if (!serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { logger.info("Partition {} lease was taken over by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } serverLease.setContinuationToken(continuationToken); return serverLease; })) .doOnError(throwable -> { logger.info("Partition {} lease with token '{}' failed to checkpoint for owner '{}' with continuation token '{}'", lease.getLeaseToken(), lease.getConcurrencyToken(), lease.getOwner(), lease.getContinuationToken()); }); } @Override public Mono<Boolean> isInitialized() { return this.leaseStore.isInitialized(); } @Override public Mono<Boolean> markInitialized() { return this.leaseStore.markInitialized(); } @Override public Mono<Boolean> acquireInitializationLock(Duration lockExpirationTime) { return this.leaseStore.acquireInitializationLock(lockExpirationTime); } @Override public Mono<Boolean> releaseInitializationLock() { return this.leaseStore.releaseInitializationLock(); } private Flux<ServiceItemLease> listDocuments(String prefix) { if (prefix == null || prefix.isEmpty()) { throw new IllegalArgumentException("prefix"); } SqlParameter param = new SqlParameter(); param.setName("@PartitionLeasePrefix"); param.setValue(prefix); SqlQuerySpec querySpec = new SqlQuerySpec( "SELECT * FROM c WHERE STARTSWITH(c.id, @PartitionLeasePrefix)", Collections.singletonList(param)); Flux<FeedResponse<InternalObjectNode>> query = this.leaseDocumentClient.queryItems( this.settings.getLeaseCollectionLink(), querySpec, this.requestOptionsFactory.createQueryRequestOptions(), InternalObjectNode.class); return query.flatMap( documentFeedResponse -> Flux.fromIterable(documentFeedResponse.getResults())) .map(ServiceItemLease::fromDocument); } private String getDocumentId(String leaseToken) { return this.getPartitionLeasePrefix() + leaseToken; } private String getPartitionLeasePrefix() { return this.settings.getContainerNamePrefix() + LEASE_STORE_MANAGER_LEASE_SUFFIX; } }
Any test coverage that would need to be added for the scenario?
public Mono<Lease> checkpoint(Lease lease, String continuationToken) { if (lease == null) { throw new IllegalArgumentException("lease"); } if (continuationToken == null || continuationToken.isEmpty()) { throw new IllegalArgumentException("continuationToken must be a non-empty string"); } return this.leaseDocumentClient.readItem(lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), InternalObjectNode.class) .map( documentResourceResponse -> ServiceItemLease.fromDocument(BridgeInternal.getProperties(documentResourceResponse))) .flatMap( refreshedLease -> this.leaseUpdater.updateLease( refreshedLease, lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), serverLease -> { if (serverLease.getOwner() == null) { logger.info("Partition {} lease was taken over and released by a different owner", lease.getLeaseToken()); throw new LeaseLostException(lease); } else if (serverLease.getOwner() != null && !serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { logger.info("Partition {} lease was taken over by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } serverLease.setContinuationToken(continuationToken); return serverLease; })) .doOnError(throwable -> { logger.info("Partition {} lease with token '{}' failed to checkpoint for owner '{}' with continuation token '{}'", lease.getLeaseToken(), lease.getConcurrencyToken(), lease.getOwner(), lease.getContinuationToken()); }); }
refreshedLease,
public Mono<Lease> checkpoint(Lease lease, String continuationToken) { if (lease == null) { throw new IllegalArgumentException("lease"); } if (continuationToken == null || continuationToken.isEmpty()) { throw new IllegalArgumentException("continuationToken must be a non-empty string"); } return this.leaseDocumentClient.readItem(lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), InternalObjectNode.class) .map( documentResourceResponse -> ServiceItemLease.fromDocument(BridgeInternal.getProperties(documentResourceResponse))) .flatMap( refreshedLease -> this.leaseUpdater.updateLease( refreshedLease, lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), serverLease -> { if (serverLease.getOwner() == null) { logger.info("Partition {} lease was taken over and released by a different owner", lease.getLeaseToken()); throw new LeaseLostException(lease); } else if (!serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { logger.info("Partition {} lease was taken over by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } serverLease.setContinuationToken(continuationToken); return serverLease; })) .doOnError(throwable -> { logger.info("Partition {} lease with token '{}' failed to checkpoint for owner '{}' with continuation token '{}'", lease.getLeaseToken(), lease.getConcurrencyToken(), lease.getOwner(), lease.getContinuationToken()); }); }
class LeaseStoreManagerImpl implements LeaseStoreManager, LeaseStoreManager.LeaseStoreManagerBuilderDefinition { private final String LEASE_STORE_MANAGER_LEASE_SUFFIX = ".."; private final Logger logger = LoggerFactory.getLogger(LeaseStoreManagerImpl.class); private LeaseStoreManagerSettings settings; private ChangeFeedContextClient leaseDocumentClient; private RequestOptionsFactory requestOptionsFactory; private ServiceItemLeaseUpdater leaseUpdater; private LeaseStore leaseStore; public static LeaseStoreManagerBuilderDefinition builder() { return new LeaseStoreManagerImpl(); } public LeaseStoreManagerImpl() { this.settings = new LeaseStoreManagerSettings(); } @Override public LeaseStoreManagerBuilderDefinition leaseContextClient(ChangeFeedContextClient leaseContextClient) { if (leaseContextClient == null) { throw new IllegalArgumentException("leaseContextClient"); } this.leaseDocumentClient = leaseContextClient; return this; } @Override public LeaseStoreManagerBuilderDefinition leasePrefix(String leasePrefix) { if (leasePrefix == null) { throw new IllegalArgumentException("leasePrefix"); } this.settings.withContainerNamePrefix(leasePrefix); return this; } @Override public LeaseStoreManagerBuilderDefinition leaseCollectionLink(CosmosAsyncContainer leaseCollectionLink) { if (leaseCollectionLink == null) { throw new IllegalArgumentException("leaseCollectionLink"); } this.settings.withLeaseCollectionLink(leaseCollectionLink); return this; } @Override public LeaseStoreManagerBuilderDefinition requestOptionsFactory(RequestOptionsFactory requestOptionsFactory) { if (requestOptionsFactory == null) { throw new IllegalArgumentException("requestOptionsFactory"); } this.requestOptionsFactory = requestOptionsFactory; return this; } @Override public LeaseStoreManagerBuilderDefinition hostName(String hostName) { if (hostName == null) { throw new IllegalArgumentException("hostName"); } this.settings.withHostName(hostName); return this; } @Override public Mono<LeaseStoreManager> build() { if (this.settings == null) { throw new IllegalArgumentException("properties"); } if (this.settings.getContainerNamePrefix() == null) { throw new IllegalArgumentException("properties.containerNamePrefix"); } if (this.settings.getLeaseCollectionLink() == null) { throw new IllegalArgumentException("properties.leaseCollectionLink"); } if (this.settings.getHostName() == null || this.settings.getHostName().isEmpty()) { throw new IllegalArgumentException("properties.hostName"); } if (this.leaseDocumentClient == null) { throw new IllegalArgumentException("leaseDocumentClient"); } if (this.requestOptionsFactory == null) { throw new IllegalArgumentException("requestOptionsFactory"); } if (this.leaseUpdater == null) { this.leaseUpdater = new DocumentServiceLeaseUpdaterImpl(leaseDocumentClient); } this.leaseStore = new DocumentServiceLeaseStore( this.leaseDocumentClient, this.settings.getContainerNamePrefix(), this.settings.getLeaseCollectionLink(), this.requestOptionsFactory); return Mono.just(this); } @Override public Flux<Lease> getAllLeases() { return this.listDocuments(this.getPartitionLeasePrefix()) .map(documentServiceLease -> documentServiceLease); } @Override public Flux<Lease> getOwnedLeases() { return this.getAllLeases() .filter(lease -> lease.getOwner() != null && lease.getOwner().equalsIgnoreCase(this.settings.getHostName())); } @Override public Mono<Lease> createLeaseIfNotExist(String leaseToken, String continuationToken) { if (leaseToken == null) { throw new IllegalArgumentException("leaseToken"); } String leaseDocId = this.getDocumentId(leaseToken); ServiceItemLease documentServiceLease = new ServiceItemLease() .withId(leaseDocId) .withLeaseToken(leaseToken) .withContinuationToken(continuationToken); return this.leaseDocumentClient.createItem(this.settings.getLeaseCollectionLink(), documentServiceLease, null, false) .onErrorResume( ex -> { if (ex instanceof CosmosException) { CosmosException e = (CosmosException) ex; if (e.getStatusCode() == ChangeFeedHelper.HTTP_STATUS_CODE_CONFLICT) { logger.info("Some other host created lease for {}.", leaseToken); return Mono.empty(); } } return Mono.error(ex); }) .map(documentResourceResponse -> { if (documentResourceResponse == null) { return null; } InternalObjectNode document = BridgeInternal.getProperties(documentResourceResponse); logger.info("Created lease for partition {}.", leaseToken); return documentServiceLease .withId(document.getId()) .withETag(document.getETag()) .withTs(ModelBridgeInternal.getStringFromJsonSerializable(document, Constants.Properties.LAST_MODIFIED)); }); } @Override public Mono<Void> delete(Lease lease) { if (lease == null || lease.getId() == null) { throw new IllegalArgumentException("lease"); } return this.leaseDocumentClient .deleteItem(lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease)) .onErrorResume( ex -> { if (ex instanceof CosmosException) { CosmosException e = (CosmosException) ex; if (e.getStatusCode() == ChangeFeedHelper.HTTP_STATUS_CODE_NOT_FOUND) { return Mono.empty(); } } return Mono.error(ex); }) .map( documentResourceResponse -> true) .then(); } @Override public Mono<Lease> acquire(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } String oldOwner = lease.getOwner(); return this.leaseUpdater.updateLease( lease, lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), serverLease -> { if (serverLease.getOwner() != null && !serverLease.getOwner().equalsIgnoreCase(oldOwner)) { logger.info("Partition {} lease was taken over by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } serverLease.setOwner(this.settings.getHostName()); serverLease.setProperties(lease.getProperties()); return serverLease; }); } @Override public Mono<Void> release(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } return this.leaseDocumentClient.readItem(lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), InternalObjectNode.class) .onErrorResume( ex -> { if (ex instanceof CosmosException) { CosmosException e = (CosmosException) ex; if (e.getStatusCode() == ChangeFeedHelper.HTTP_STATUS_CODE_NOT_FOUND) { logger.info("Partition {} failed to renew lease. The lease is gone already.", lease.getLeaseToken()); throw new LeaseLostException(lease); } } return Mono.error(ex); }) .map( documentResourceResponse -> ServiceItemLease.fromDocument(BridgeInternal.getProperties(documentResourceResponse))) .flatMap( refreshedLease -> this.leaseUpdater.updateLease( refreshedLease, lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), serverLease -> { if (serverLease.getOwner() != null && !serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { logger.info("Partition {} no need to release lease. The lease was already taken by another host '{}'.", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } serverLease.setOwner(null); return serverLease; }) ).then(); } @Override public Mono<Lease> renew(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } return this.leaseDocumentClient.readItem(lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), InternalObjectNode.class) .onErrorResume( ex -> { if (ex instanceof CosmosException) { CosmosException e = (CosmosException) ex; if (e.getStatusCode() == ChangeFeedHelper.HTTP_STATUS_CODE_NOT_FOUND) { logger.info("Partition {} failed to renew lease. The lease is gone already.", lease.getLeaseToken()); throw new LeaseLostException(lease); } } return Mono.error(ex); }) .map( documentResourceResponse -> ServiceItemLease.fromDocument(BridgeInternal.getProperties(documentResourceResponse))) .flatMap( refreshedLease -> this.leaseUpdater.updateLease( refreshedLease, lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), serverLease -> { if (serverLease.getOwner() == null) { logger.info("Partition {} lease was taken over and released by a different owner", lease.getLeaseToken()); throw new LeaseLostException(lease); } else if (serverLease.getOwner() != null && !serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { logger.info("Partition {} lease was taken over by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } return serverLease; }) ); } @Override public Mono<Lease> updateProperties(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } if (lease.getOwner() != null && !lease.getOwner().equalsIgnoreCase(this.settings.getHostName())) { logger.info("Partition '{}' lease was taken over by owner '{}' before lease item update", lease.getLeaseToken(), lease.getOwner()); throw new LeaseLostException(lease); } return this.leaseUpdater.updateLease( lease, lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), serverLease -> { if (serverLease.getOwner() != null && !serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { logger.info("Partition '{}' lease was taken over by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } serverLease.setProperties(lease.getProperties()); return serverLease; }); } @Override @Override public Mono<Boolean> isInitialized() { return this.leaseStore.isInitialized(); } @Override public Mono<Boolean> markInitialized() { return this.leaseStore.markInitialized(); } @Override public Mono<Boolean> acquireInitializationLock(Duration lockExpirationTime) { return this.leaseStore.acquireInitializationLock(lockExpirationTime); } @Override public Mono<Boolean> releaseInitializationLock() { return this.leaseStore.releaseInitializationLock(); } private Flux<ServiceItemLease> listDocuments(String prefix) { if (prefix == null || prefix.isEmpty()) { throw new IllegalArgumentException("prefix"); } SqlParameter param = new SqlParameter(); param.setName("@PartitionLeasePrefix"); param.setValue(prefix); SqlQuerySpec querySpec = new SqlQuerySpec( "SELECT * FROM c WHERE STARTSWITH(c.id, @PartitionLeasePrefix)", Collections.singletonList(param)); Flux<FeedResponse<InternalObjectNode>> query = this.leaseDocumentClient.queryItems( this.settings.getLeaseCollectionLink(), querySpec, this.requestOptionsFactory.createQueryRequestOptions(), InternalObjectNode.class); return query.flatMap( documentFeedResponse -> Flux.fromIterable(documentFeedResponse.getResults())) .map(ServiceItemLease::fromDocument); } private String getDocumentId(String leaseToken) { return this.getPartitionLeasePrefix() + leaseToken; } private String getPartitionLeasePrefix() { return this.settings.getContainerNamePrefix() + LEASE_STORE_MANAGER_LEASE_SUFFIX; } }
class LeaseStoreManagerImpl implements LeaseStoreManager, LeaseStoreManager.LeaseStoreManagerBuilderDefinition { private final String LEASE_STORE_MANAGER_LEASE_SUFFIX = ".."; private final Logger logger = LoggerFactory.getLogger(LeaseStoreManagerImpl.class); private LeaseStoreManagerSettings settings; private ChangeFeedContextClient leaseDocumentClient; private RequestOptionsFactory requestOptionsFactory; private ServiceItemLeaseUpdater leaseUpdater; private LeaseStore leaseStore; public static LeaseStoreManagerBuilderDefinition builder() { return new LeaseStoreManagerImpl(); } public LeaseStoreManagerImpl() { this.settings = new LeaseStoreManagerSettings(); } @Override public LeaseStoreManagerBuilderDefinition leaseContextClient(ChangeFeedContextClient leaseContextClient) { if (leaseContextClient == null) { throw new IllegalArgumentException("leaseContextClient"); } this.leaseDocumentClient = leaseContextClient; return this; } @Override public LeaseStoreManagerBuilderDefinition leasePrefix(String leasePrefix) { if (leasePrefix == null) { throw new IllegalArgumentException("leasePrefix"); } this.settings.withContainerNamePrefix(leasePrefix); return this; } @Override public LeaseStoreManagerBuilderDefinition leaseCollectionLink(CosmosAsyncContainer leaseCollectionLink) { if (leaseCollectionLink == null) { throw new IllegalArgumentException("leaseCollectionLink"); } this.settings.withLeaseCollectionLink(leaseCollectionLink); return this; } @Override public LeaseStoreManagerBuilderDefinition requestOptionsFactory(RequestOptionsFactory requestOptionsFactory) { if (requestOptionsFactory == null) { throw new IllegalArgumentException("requestOptionsFactory"); } this.requestOptionsFactory = requestOptionsFactory; return this; } @Override public LeaseStoreManagerBuilderDefinition hostName(String hostName) { if (hostName == null) { throw new IllegalArgumentException("hostName"); } this.settings.withHostName(hostName); return this; } @Override public Mono<LeaseStoreManager> build() { if (this.settings == null) { throw new IllegalArgumentException("properties"); } if (this.settings.getContainerNamePrefix() == null) { throw new IllegalArgumentException("properties.containerNamePrefix"); } if (this.settings.getLeaseCollectionLink() == null) { throw new IllegalArgumentException("properties.leaseCollectionLink"); } if (this.settings.getHostName() == null || this.settings.getHostName().isEmpty()) { throw new IllegalArgumentException("properties.hostName"); } if (this.leaseDocumentClient == null) { throw new IllegalArgumentException("leaseDocumentClient"); } if (this.requestOptionsFactory == null) { throw new IllegalArgumentException("requestOptionsFactory"); } if (this.leaseUpdater == null) { this.leaseUpdater = new DocumentServiceLeaseUpdaterImpl(leaseDocumentClient); } this.leaseStore = new DocumentServiceLeaseStore( this.leaseDocumentClient, this.settings.getContainerNamePrefix(), this.settings.getLeaseCollectionLink(), this.requestOptionsFactory); return Mono.just(this); } @Override public Flux<Lease> getAllLeases() { return this.listDocuments(this.getPartitionLeasePrefix()) .map(documentServiceLease -> documentServiceLease); } @Override public Flux<Lease> getOwnedLeases() { return this.getAllLeases() .filter(lease -> lease.getOwner() != null && lease.getOwner().equalsIgnoreCase(this.settings.getHostName())); } @Override public Mono<Lease> createLeaseIfNotExist(String leaseToken, String continuationToken) { if (leaseToken == null) { throw new IllegalArgumentException("leaseToken"); } String leaseDocId = this.getDocumentId(leaseToken); ServiceItemLease documentServiceLease = new ServiceItemLease() .withId(leaseDocId) .withLeaseToken(leaseToken) .withContinuationToken(continuationToken); return this.leaseDocumentClient.createItem(this.settings.getLeaseCollectionLink(), documentServiceLease, null, false) .onErrorResume( ex -> { if (ex instanceof CosmosException) { CosmosException e = (CosmosException) ex; if (e.getStatusCode() == ChangeFeedHelper.HTTP_STATUS_CODE_CONFLICT) { logger.info("Some other host created lease for {}.", leaseToken); return Mono.empty(); } } return Mono.error(ex); }) .map(documentResourceResponse -> { if (documentResourceResponse == null) { return null; } InternalObjectNode document = BridgeInternal.getProperties(documentResourceResponse); logger.info("Created lease for partition {}.", leaseToken); return documentServiceLease .withId(document.getId()) .withETag(document.getETag()) .withTs(ModelBridgeInternal.getStringFromJsonSerializable(document, Constants.Properties.LAST_MODIFIED)); }); } @Override public Mono<Void> delete(Lease lease) { if (lease == null || lease.getId() == null) { throw new IllegalArgumentException("lease"); } return this.leaseDocumentClient .deleteItem(lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease)) .onErrorResume( ex -> { if (ex instanceof CosmosException) { CosmosException e = (CosmosException) ex; if (e.getStatusCode() == ChangeFeedHelper.HTTP_STATUS_CODE_NOT_FOUND) { return Mono.empty(); } } return Mono.error(ex); }) .map( documentResourceResponse -> true) .then(); } @Override public Mono<Lease> acquire(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } String oldOwner = lease.getOwner(); return this.leaseUpdater.updateLease( lease, lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), serverLease -> { if (serverLease.getOwner() != null && !serverLease.getOwner().equalsIgnoreCase(oldOwner)) { logger.info("Partition {} lease was taken over by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } serverLease.setOwner(this.settings.getHostName()); serverLease.setProperties(lease.getProperties()); return serverLease; }); } @Override public Mono<Void> release(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } return this.leaseDocumentClient.readItem(lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), InternalObjectNode.class) .onErrorResume( ex -> { if (ex instanceof CosmosException) { CosmosException e = (CosmosException) ex; if (e.getStatusCode() == ChangeFeedHelper.HTTP_STATUS_CODE_NOT_FOUND) { logger.info("Partition {} failed to renew lease. The lease is gone already.", lease.getLeaseToken()); throw new LeaseLostException(lease); } } return Mono.error(ex); }) .map( documentResourceResponse -> ServiceItemLease.fromDocument(BridgeInternal.getProperties(documentResourceResponse))) .flatMap( refreshedLease -> this.leaseUpdater.updateLease( refreshedLease, lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), serverLease -> { if (serverLease.getOwner() != null && !serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { logger.info("Partition {} no need to release lease. The lease was already taken by another host '{}'.", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } serverLease.setOwner(null); return serverLease; }) ).then(); } @Override public Mono<Lease> renew(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } return this.leaseDocumentClient.readItem(lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), InternalObjectNode.class) .onErrorResume( ex -> { if (ex instanceof CosmosException) { CosmosException e = (CosmosException) ex; if (e.getStatusCode() == ChangeFeedHelper.HTTP_STATUS_CODE_NOT_FOUND) { logger.info("Partition {} failed to renew lease. The lease is gone already.", lease.getLeaseToken()); throw new LeaseLostException(lease); } } return Mono.error(ex); }) .map( documentResourceResponse -> ServiceItemLease.fromDocument(BridgeInternal.getProperties(documentResourceResponse))) .flatMap( refreshedLease -> this.leaseUpdater.updateLease( refreshedLease, lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), serverLease -> { if (serverLease.getOwner() == null) { logger.info("Partition {} lease was taken over and released by a different owner", lease.getLeaseToken()); throw new LeaseLostException(lease); } else if (!serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { logger.info("Partition {} lease was taken over by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } return serverLease; }) ); } @Override public Mono<Lease> updateProperties(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } if (lease.getOwner() != null && !lease.getOwner().equalsIgnoreCase(this.settings.getHostName())) { logger.info("Partition '{}' lease was taken over by owner '{}' before lease item update", lease.getLeaseToken(), lease.getOwner()); throw new LeaseLostException(lease); } return this.leaseUpdater.updateLease( lease, lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), serverLease -> { if (serverLease.getOwner() != null && !serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { logger.info("Partition '{}' lease was taken over by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } serverLease.setProperties(lease.getProperties()); return serverLease; }); } @Override @Override public Mono<Boolean> isInitialized() { return this.leaseStore.isInitialized(); } @Override public Mono<Boolean> markInitialized() { return this.leaseStore.markInitialized(); } @Override public Mono<Boolean> acquireInitializationLock(Duration lockExpirationTime) { return this.leaseStore.acquireInitializationLock(lockExpirationTime); } @Override public Mono<Boolean> releaseInitializationLock() { return this.leaseStore.releaseInitializationLock(); } private Flux<ServiceItemLease> listDocuments(String prefix) { if (prefix == null || prefix.isEmpty()) { throw new IllegalArgumentException("prefix"); } SqlParameter param = new SqlParameter(); param.setName("@PartitionLeasePrefix"); param.setValue(prefix); SqlQuerySpec querySpec = new SqlQuerySpec( "SELECT * FROM c WHERE STARTSWITH(c.id, @PartitionLeasePrefix)", Collections.singletonList(param)); Flux<FeedResponse<InternalObjectNode>> query = this.leaseDocumentClient.queryItems( this.settings.getLeaseCollectionLink(), querySpec, this.requestOptionsFactory.createQueryRequestOptions(), InternalObjectNode.class); return query.flatMap( documentFeedResponse -> Flux.fromIterable(documentFeedResponse.getResults())) .map(ServiceItemLease::fromDocument); } private String getDocumentId(String leaseToken) { return this.getPartitionLeasePrefix() + leaseToken; } private String getPartitionLeasePrefix() { return this.settings.getContainerNamePrefix() + LEASE_STORE_MANAGER_LEASE_SUFFIX; } }
Remove due to these no longer work (could be caused by security policy).
public void canCreateVirtualMachineScaleSetWithCustomScriptExtension() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); final String uname = "jvuser"; final String password = password(); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define(generateRandomResourceName("vmssvnet", 15)) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, resourceGroup, "1"); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername(uname) .withSsh(sshPublicKey()) .withUnmanagedDisks() .withNewStorageAccount(generateRandomResourceName("stg", 15)) .withNewStorageAccount(generateRandomResourceName("stg", 15)) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("commandToExecute", "ls") .attach() .withUpgradeMode(UpgradeMode.MANUAL) .create(); checkVMInstances(virtualMachineScaleSet); List<String> publicIPAddressIds = virtualMachineScaleSet.primaryPublicIpAddressIds(); PublicIpAddress publicIPAddress = this.networkManager.publicIpAddresses().getById(publicIPAddressIds.get(0)); String fqdn = publicIPAddress.fqdn(); Assertions.assertNotNull(fqdn); for (VirtualMachineScaleSetVM vm : virtualMachineScaleSet.virtualMachines().list()) { PagedIterable<VirtualMachineScaleSetNetworkInterface> networkInterfaces = vm.listNetworkInterfaces(); Assertions.assertEquals(TestUtilities.getSize(networkInterfaces), 1); VirtualMachineScaleSetNetworkInterface networkInterface = networkInterfaces.iterator().next(); VirtualMachineScaleSetNicIpConfiguration primaryIpConfig = null; primaryIpConfig = networkInterface.primaryIPConfiguration(); Assertions.assertNotNull(primaryIpConfig); Integer sshFrontendPort = null; List<LoadBalancerInboundNatRule> natRules = primaryIpConfig.listAssociatedLoadBalancerInboundNatRules(); for (LoadBalancerInboundNatRule natRule : natRules) { if (natRule.backendPort() == 22) { sshFrontendPort = natRule.frontendPort(); break; } } Assertions.assertNotNull(sshFrontendPort); } }
public void canCreateVirtualMachineScaleSetWithCustomScriptExtension() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); final String uname = "jvuser"; final String password = password(); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define(generateRandomResourceName("vmssvnet", 15)) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, resourceGroup, "1"); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername(uname) .withSsh(sshPublicKey()) .withUnmanagedDisks() .withNewStorageAccount(generateRandomResourceName("stg", 15)) .withNewStorageAccount(generateRandomResourceName("stg", 15)) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("commandToExecute", "ls") .attach() .withUpgradeMode(UpgradeMode.MANUAL) .create(); checkVMInstances(virtualMachineScaleSet); List<String> publicIPAddressIds = virtualMachineScaleSet.primaryPublicIpAddressIds(); PublicIpAddress publicIPAddress = this.networkManager.publicIpAddresses().getById(publicIPAddressIds.get(0)); String fqdn = publicIPAddress.fqdn(); Assertions.assertNotNull(fqdn); for (VirtualMachineScaleSetVM vm : virtualMachineScaleSet.virtualMachines().list()) { PagedIterable<VirtualMachineScaleSetNetworkInterface> networkInterfaces = vm.listNetworkInterfaces(); Assertions.assertEquals(TestUtilities.getSize(networkInterfaces), 1); VirtualMachineScaleSetNetworkInterface networkInterface = networkInterfaces.iterator().next(); VirtualMachineScaleSetNicIpConfiguration primaryIpConfig = null; primaryIpConfig = networkInterface.primaryIPConfiguration(); Assertions.assertNotNull(primaryIpConfig); Integer sshFrontendPort = null; List<LoadBalancerInboundNatRule> natRules = primaryIpConfig.listAssociatedLoadBalancerInboundNatRules(); for (LoadBalancerInboundNatRule natRule : natRules) { if (natRule.backendPort() == 22) { sshFrontendPort = natRule.frontendPort(); break; } } Assertions.assertNotNull(sshFrontendPort); } }
class VirtualMachineScaleSetOperationsTests extends ComputeManagementTest { private String rgName = ""; private final Region region = Region.US_WEST; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { rgName = generateRandomResourceName("javacsmrg", 15); super.initializeClients(httpPipeline, profile); } @Override protected void cleanUpResources() { if (rgName != null) { resourceManager.resourceGroups().beginDeleteByName(rgName); } } @Test public void canCreateVMSSWithPlan() { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); final String uname = "jvuser"; Network network = this .networkManager .networks() .define(generateRandomResourceName("vmssvnet", 15)) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); PurchasePlan plan = new PurchasePlan() .withName("access_server_byol") .withPublisher("openvpn") .withProduct("openvpnas"); ImageReference imageReference = new ImageReference() .withPublisher("openvpn") .withOffer("openvpnas") .withSku("access_server_byol") .withVersion("latest"); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withoutPrimaryInternetFacingLoadBalancer() .withoutPrimaryInternalLoadBalancer() .withSpecificLinuxImageVersion(imageReference) .withRootUsername(uname) .withSsh(sshPublicKey()) .withNewDataDisk(1) .withPlan(plan) .create(); VirtualMachineScaleSet currentVirtualMachineScaleSet = this.computeManager.virtualMachineScaleSets().getByResourceGroup(rgName, vmssName); Assertions.assertEquals("access_server_byol", currentVirtualMachineScaleSet.plan().name()); Assertions.assertEquals("openvpn", currentVirtualMachineScaleSet.plan().publisher()); Assertions.assertEquals("openvpnas", currentVirtualMachineScaleSet.plan().product()); } @Test public void canUpdateVirtualMachineScaleSetWithExtensionProtectedSettings() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); final String uname = "jvuser"; final String password = password(); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); StorageAccount storageAccount = this .storageManager .storageAccounts() .define(generateRandomResourceName("stg", 15)) .withRegion(region) .withExistingResourceGroup(resourceGroup) .create(); List<StorageAccountKey> keys = storageAccount.getKeys(); Assertions.assertNotNull(keys); Assertions.assertTrue(keys.size() > 0); String storageAccountKey = keys.get(0).value(); Network network = this .networkManager .networks() .define(generateRandomResourceName("vmssvnet", 15)) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withoutPrimaryInternetFacingLoadBalancer() .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername(uname) .withSsh(sshPublicKey()) .withUnmanagedDisks() .withNewStorageAccount(generateRandomResourceName("stg", 15)) .withExistingStorageAccount(storageAccount) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("commandToExecute", "ls") .withProtectedSetting("storageAccountName", storageAccount.name()) .withProtectedSetting("storageAccountKey", storageAccountKey) .attach() .create(); Map<String, VirtualMachineScaleSetExtension> extensions = virtualMachineScaleSet.extensions(); Assertions.assertNotNull(extensions); Assertions.assertTrue(extensions.size() > 0); Assertions.assertTrue(extensions.containsKey("CustomScriptForLinux")); VirtualMachineScaleSetExtension extension = extensions.get("CustomScriptForLinux"); Assertions.assertNotNull(extension.publicSettings()); Assertions.assertEquals(1, extension.publicSettings().size()); Assertions.assertNotNull(extension.publicSettingsAsJsonString()); VirtualMachineScaleSet scaleSet = this.computeManager.virtualMachineScaleSets().getById(virtualMachineScaleSet.id()); extensions = scaleSet.extensions(); Assertions.assertNotNull(extensions); Assertions.assertTrue(extensions.size() > 0); Assertions.assertTrue(extensions.containsKey("CustomScriptForLinux")); extension = extensions.get("CustomScriptForLinux"); Assertions.assertNotNull(extension.publicSettings()); Assertions.assertEquals(1, extension.publicSettings().size()); Assertions.assertNotNull(extension.publicSettingsAsJsonString()); int newCapacity = scaleSet.capacity() + 1; virtualMachineScaleSet.update().withCapacity(newCapacity).apply(); extensions = virtualMachineScaleSet.extensions(); Assertions.assertNotNull(extensions); Assertions.assertTrue(extensions.size() > 0); Assertions.assertTrue(extensions.containsKey("CustomScriptForLinux")); extension = extensions.get("CustomScriptForLinux"); Assertions.assertNotNull(extension.publicSettings()); Assertions.assertEquals(1, extension.publicSettings().size()); Assertions.assertNotNull(extension.publicSettingsAsJsonString()); } @Test @Test public void canCreateVirtualMachineScaleSetWithOptionalNetworkSettings() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); final String vmssVmDnsLabel = generateRandomResourceName("pip", 10); final String nsgName = generateRandomResourceName("nsg", 10); final String asgName = generateRandomResourceName("asg", 8); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); ApplicationSecurityGroup asg = this .networkManager .applicationSecurityGroups() .define(asgName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .create(); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_D3_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withoutPrimaryInternetFacingLoadBalancer() .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withVirtualMachinePublicIp(vmssVmDnsLabel) .withExistingApplicationSecurityGroup(asg) .create(); VirtualMachineScaleSetPublicIpAddressConfiguration currentIpConfig = virtualMachineScaleSet.virtualMachinePublicIpConfig(); Assertions.assertNotNull(currentIpConfig); Assertions.assertNotNull(currentIpConfig.dnsSettings()); Assertions.assertNotNull(currentIpConfig.dnsSettings().domainNameLabel()); currentIpConfig.withIdleTimeoutInMinutes(20); virtualMachineScaleSet.update().withVirtualMachinePublicIp(currentIpConfig).apply(); currentIpConfig = virtualMachineScaleSet.virtualMachinePublicIpConfig(); Assertions.assertNotNull(currentIpConfig); Assertions.assertNotNull(currentIpConfig.idleTimeoutInMinutes()); Assertions.assertEquals((long) 20, (long) currentIpConfig.idleTimeoutInMinutes()); virtualMachineScaleSet.refresh(); currentIpConfig = virtualMachineScaleSet.virtualMachinePublicIpConfig(); Assertions.assertNotNull(currentIpConfig); Assertions.assertNotNull(currentIpConfig.idleTimeoutInMinutes()); Assertions.assertEquals((long) 20, (long) currentIpConfig.idleTimeoutInMinutes()); List<String> asgIds = virtualMachineScaleSet.applicationSecurityGroupIds(); Assertions.assertNotNull(asgIds); Assertions.assertEquals(1, asgIds.size()); NetworkSecurityGroup nsg = networkManager .networkSecurityGroups() .define(nsgName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .defineRule("rule1") .allowOutbound() .fromAnyAddress() .fromPort(80) .toAnyAddress() .toPort(80) .withProtocol(SecurityRuleProtocol.TCP) .attach() .create(); virtualMachineScaleSet.deallocate(); virtualMachineScaleSet .update() .withIpForwarding() .withAcceleratedNetworking() .withExistingNetworkSecurityGroup(nsg) .apply(); Assertions.assertTrue(virtualMachineScaleSet.isIpForwardingEnabled()); Assertions.assertTrue(virtualMachineScaleSet.isAcceleratedNetworkingEnabled()); Assertions.assertNotNull(virtualMachineScaleSet.networkSecurityGroupId()); virtualMachineScaleSet.refresh(); Assertions.assertTrue(virtualMachineScaleSet.isIpForwardingEnabled()); Assertions.assertTrue(virtualMachineScaleSet.isAcceleratedNetworkingEnabled()); Assertions.assertNotNull(virtualMachineScaleSet.networkSecurityGroupId()); virtualMachineScaleSet .update() .withoutIpForwarding() .withoutAcceleratedNetworking() .withoutNetworkSecurityGroup() .apply(); Assertions.assertFalse(virtualMachineScaleSet.isIpForwardingEnabled()); Assertions.assertFalse(virtualMachineScaleSet.isAcceleratedNetworkingEnabled()); Assertions.assertNull(virtualMachineScaleSet.networkSecurityGroupId()); } @Test @Disabled("Mock framework doesn't support data plane") public void canCreateVirtualMachineScaleSetWithSecret() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); final String vaultName = generateRandomResourceName("vlt", 10); final String secretName = generateRandomResourceName("srt", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region, resourceGroup, "1", LoadBalancerSkuType.BASIC); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); Vault vault = this .keyVaultManager .vaults() .define(vaultName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .defineAccessPolicy() .forServicePrincipal(clientIdFromFile()) .allowSecretAllPermissions() .attach() .withDeploymentEnabled() .create(); final InputStream embeddedJsonConfig = VirtualMachineExtensionOperationsTests.class.getResourceAsStream("/myTest.txt"); String secretValue = IOUtils.toString(embeddedJsonConfig, StandardCharsets.UTF_8); Secret secret = vault.secrets().define(secretName).withValue(secretValue).create(); List<VaultCertificate> certs = new ArrayList<>(); certs.add(new VaultCertificate().withCertificateUrl(secret.id())); List<VaultSecretGroup> group = new ArrayList<>(); group .add( new VaultSecretGroup() .withSourceVault(new SubResource().withId(vault.id())) .withVaultCertificates(certs)); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withSecrets(group) .withNewStorageAccount(generateRandomResourceName("stg", 15)) .withNewStorageAccount(generateRandomResourceName("stg3", 15)) .withUpgradeMode(UpgradeMode.MANUAL) .create(); for (VirtualMachineScaleSetVM vm : virtualMachineScaleSet.virtualMachines().list()) { Assertions.assertTrue(vm.osProfile().secrets().size() > 0); } virtualMachineScaleSet.update().withoutSecrets().apply(); for (VirtualMachineScaleSetVM vm : virtualMachineScaleSet.virtualMachines().list()) { Assertions.assertTrue(vm.osProfile().secrets().size() == 0); } } public void canCreateVirtualMachineScaleSet() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region, resourceGroup, "1", LoadBalancerSkuType.BASIC); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withNewStorageAccount(generateRandomResourceName("stg", 15)) .withNewStorageAccount(generateRandomResourceName("stg", 15)) .create(); Assertions.assertNull(virtualMachineScaleSet.getPrimaryInternalLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternalLoadBalancerBackends().size() == 0); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternalLoadBalancerInboundNatPools().size() == 0); Assertions.assertNotNull(virtualMachineScaleSet.getPrimaryInternetFacingLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerBackends().size() == 2); Assertions .assertTrue(virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerInboundNatPools().size() == 2); Network primaryNetwork = virtualMachineScaleSet.getPrimaryNetwork(); Assertions.assertNotNull(primaryNetwork.id()); PagedIterable<VirtualMachineScaleSetNetworkInterface> nics = virtualMachineScaleSet.listNetworkInterfaces(); int nicCount = 0; for (VirtualMachineScaleSetNetworkInterface nic : nics) { nicCount++; Assertions.assertNotNull(nic.id()); Assertions .assertTrue(nic.virtualMachineId().toLowerCase().startsWith(virtualMachineScaleSet.id().toLowerCase())); Assertions.assertNotNull(nic.macAddress()); Assertions.assertNotNull(nic.dnsServers()); Assertions.assertNotNull(nic.appliedDnsServers()); Map<String, VirtualMachineScaleSetNicIpConfiguration> ipConfigs = nic.ipConfigurations(); Assertions.assertEquals(ipConfigs.size(), 1); for (Map.Entry<String, VirtualMachineScaleSetNicIpConfiguration> entry : ipConfigs.entrySet()) { VirtualMachineScaleSetNicIpConfiguration ipConfig = entry.getValue(); Assertions.assertNotNull(ipConfig); Assertions.assertTrue(ipConfig.isPrimary()); Assertions.assertNotNull(ipConfig.subnetName()); Assertions.assertTrue(primaryNetwork.id().toLowerCase().equalsIgnoreCase(ipConfig.networkId())); Assertions.assertNotNull(ipConfig.privateIpAddress()); Assertions.assertNotNull(ipConfig.privateIpAddressVersion()); Assertions.assertNotNull(ipConfig.privateIpAllocationMethod()); List<LoadBalancerBackend> lbBackends = ipConfig.listAssociatedLoadBalancerBackends(); Assertions.assertEquals(lbBackends.size(), 2); for (LoadBalancerBackend lbBackend : lbBackends) { Map<String, LoadBalancingRule> lbRules = lbBackend.loadBalancingRules(); Assertions.assertEquals(lbRules.size(), 1); for (Map.Entry<String, LoadBalancingRule> ruleEntry : lbRules.entrySet()) { LoadBalancingRule rule = ruleEntry.getValue(); Assertions.assertNotNull(rule); Assertions .assertTrue( (rule.frontendPort() == 80 && rule.backendPort() == 80) || (rule.frontendPort() == 443 && rule.backendPort() == 443)); } } List<LoadBalancerInboundNatRule> lbNatRules = ipConfig.listAssociatedLoadBalancerInboundNatRules(); Assertions.assertEquals(lbNatRules.size(), 2); for (LoadBalancerInboundNatRule lbNatRule : lbNatRules) { Assertions .assertTrue( (lbNatRule.frontendPort() >= 5000 && lbNatRule.frontendPort() <= 5099) || (lbNatRule.frontendPort() >= 6000 && lbNatRule.frontendPort() <= 6099)); Assertions.assertTrue(lbNatRule.backendPort() == 22 || lbNatRule.backendPort() == 23); } } } Assertions.assertTrue(nicCount > 0); Assertions.assertEquals(virtualMachineScaleSet.vhdContainers().size(), 2); Assertions.assertEquals(virtualMachineScaleSet.sku(), VirtualMachineScaleSetSkuTypes.STANDARD_A0); Assertions.assertTrue(virtualMachineScaleSet.upgradeModel() == UpgradeMode.AUTOMATIC); Assertions.assertEquals(virtualMachineScaleSet.capacity(), 2); primaryNetwork = virtualMachineScaleSet.getPrimaryNetwork(); String inboundNatPoolToRemove = null; for (String inboundNatPoolName : virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerInboundNatPools().keySet()) { inboundNatPoolToRemove = inboundNatPoolName; break; } LoadBalancer internalLoadBalancer = createInternalLoadBalancer(region, resourceGroup, primaryNetwork, "1"); virtualMachineScaleSet .update() .withExistingPrimaryInternalLoadBalancer(internalLoadBalancer) .withoutPrimaryInternetFacingLoadBalancerNatPools(inboundNatPoolToRemove) .apply(); virtualMachineScaleSet = this.computeManager.virtualMachineScaleSets().getByResourceGroup(rgName, vmssName); Assertions.assertNotNull(virtualMachineScaleSet.getPrimaryInternetFacingLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerBackends().size() == 2); Assertions .assertTrue(virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerInboundNatPools().size() == 1); Assertions.assertNotNull(virtualMachineScaleSet.getPrimaryInternalLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternalLoadBalancerBackends().size() == 2); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternalLoadBalancerInboundNatPools().size() == 2); nics = virtualMachineScaleSet.listNetworkInterfaces(); nicCount = 0; for (VirtualMachineScaleSetNetworkInterface nic : nics) { nicCount++; Map<String, VirtualMachineScaleSetNicIpConfiguration> ipConfigs = nic.ipConfigurations(); Assertions.assertEquals(ipConfigs.size(), 1); for (Map.Entry<String, VirtualMachineScaleSetNicIpConfiguration> entry : ipConfigs.entrySet()) { VirtualMachineScaleSetNicIpConfiguration ipConfig = entry.getValue(); Assertions.assertNotNull(ipConfig); List<LoadBalancerBackend> lbBackends = ipConfig.listAssociatedLoadBalancerBackends(); Assertions.assertNotNull(lbBackends); for (LoadBalancerBackend lbBackend : lbBackends) { Map<String, LoadBalancingRule> lbRules = lbBackend.loadBalancingRules(); Assertions.assertEquals(lbRules.size(), 1); for (Map.Entry<String, LoadBalancingRule> ruleEntry : lbRules.entrySet()) { LoadBalancingRule rule = ruleEntry.getValue(); Assertions.assertNotNull(rule); Assertions .assertTrue( (rule.frontendPort() == 80 && rule.backendPort() == 80) || (rule.frontendPort() == 443 && rule.backendPort() == 443) || (rule.frontendPort() == 1000 && rule.backendPort() == 1000) || (rule.frontendPort() == 1001 && rule.backendPort() == 1001)); } } List<LoadBalancerInboundNatRule> lbNatRules = ipConfig.listAssociatedLoadBalancerInboundNatRules(); for (LoadBalancerInboundNatRule lbNatRule : lbNatRules) { Assertions .assertTrue( (lbNatRule.frontendPort() >= 6000 && lbNatRule.frontendPort() <= 6099) || (lbNatRule.frontendPort() >= 5000 && lbNatRule.frontendPort() <= 5099) || (lbNatRule.frontendPort() >= 8000 && lbNatRule.frontendPort() <= 8099) || (lbNatRule.frontendPort() >= 9000 && lbNatRule.frontendPort() <= 9099)); Assertions .assertTrue( lbNatRule.backendPort() == 23 || lbNatRule.backendPort() == 22 || lbNatRule.backendPort() == 44 || lbNatRule.backendPort() == 45); } } } Assertions.assertTrue(nicCount > 0); } /* * Previously name * canCreateTwoRegionalVirtualMachineScaleSetsAndAssociateEachWithDifferentBackendPoolOfZoneResilientLoadBalancer * but this was too long for some OSes and would cause git checkout to fail. */ @Test public void canCreateTwoRegionalVMScaleSetsWithDifferentPoolOfZoneResilientLoadBalancer() throws Exception { Region region2 = Region.US_EAST2; ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region2).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region2) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region2, resourceGroup, "1", LoadBalancerSkuType.STANDARD); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); List<String> natpools = new ArrayList<>(); for (String natPool : publicLoadBalancer.inboundNatPools().keySet()) { natpools.add(natPool); } Assertions.assertTrue(natpools.size() == 2); final String vmssName1 = generateRandomResourceName("vmss1", 10); VirtualMachineScaleSet virtualMachineScaleSet1 = this .computeManager .virtualMachineScaleSets() .define(vmssName1) .withRegion(region2) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0)) .withPrimaryInternetFacingLoadBalancerInboundNatPools(natpools.get(0)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); final String vmssName2 = generateRandomResourceName("vmss2", 10); VirtualMachineScaleSet virtualMachineScaleSet2 = this .computeManager .virtualMachineScaleSets() .define(vmssName2) .withRegion(region2) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(1)) .withPrimaryInternetFacingLoadBalancerInboundNatPools(natpools.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); Assertions.assertNull(virtualMachineScaleSet1.getPrimaryInternalLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet1.listPrimaryInternalLoadBalancerBackends().size() == 0); Assertions.assertTrue(virtualMachineScaleSet1.listPrimaryInternalLoadBalancerInboundNatPools().size() == 0); Assertions.assertNotNull(virtualMachineScaleSet1.getPrimaryInternetFacingLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet1.listPrimaryInternetFacingLoadBalancerBackends().size() == 1); Assertions.assertNull(virtualMachineScaleSet2.getPrimaryInternalLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet2.listPrimaryInternalLoadBalancerBackends().size() == 0); Assertions.assertTrue(virtualMachineScaleSet2.listPrimaryInternalLoadBalancerInboundNatPools().size() == 0); Assertions.assertNotNull(virtualMachineScaleSet2.getPrimaryInternetFacingLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet2.listPrimaryInternetFacingLoadBalancerBackends().size() == 1); } @Test public void canCreateZoneRedundantVirtualMachineScaleSetWithZoneResilientLoadBalancer() throws Exception { Region region2 = Region.US_EAST2; ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region2).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region2) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region2, resourceGroup, "1", LoadBalancerSkuType.STANDARD); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); final String vmssName = generateRandomResourceName("vmss", 10); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region2) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_D3_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withAvailabilityZone(AvailabilityZoneId.ZONE_1) .withAvailabilityZone(AvailabilityZoneId.ZONE_2) .create(); Assertions.assertNotNull(virtualMachineScaleSet.availabilityZones()); Assertions.assertEquals(2, virtualMachineScaleSet.availabilityZones().size()); Assertions.assertNull(virtualMachineScaleSet.getPrimaryInternalLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternalLoadBalancerBackends().size() == 0); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternalLoadBalancerInboundNatPools().size() == 0); Assertions.assertNotNull(virtualMachineScaleSet.getPrimaryInternetFacingLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerBackends().size() == 2); Assertions .assertTrue(virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerInboundNatPools().size() == 2); Network primaryNetwork = virtualMachineScaleSet.getPrimaryNetwork(); Assertions.assertNotNull(primaryNetwork.id()); } @Test public void canEnableMSIOnVirtualMachineScaleSetWithoutRoleAssignment() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region, resourceGroup, "1", LoadBalancerSkuType.BASIC); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withSystemAssignedManagedServiceIdentity() .create(); PagedIterable<RoleAssignment> rgRoleAssignments = authorizationManager.roleAssignments().listByScope(resourceGroup.id()); Assertions.assertNotNull(rgRoleAssignments); boolean found = false; for (RoleAssignment roleAssignment : rgRoleAssignments) { if (roleAssignment.principalId() != null && roleAssignment .principalId() .equalsIgnoreCase(virtualMachineScaleSet.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions .assertFalse( found, "Resource group should not have a role assignment with virtual machine scale set MSI principal"); } @Test public void canEnableMSIOnVirtualMachineScaleSetWithMultipleRoleAssignment() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region, resourceGroup, "1", LoadBalancerSkuType.BASIC); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); StorageAccount storageAccount = this .storageManager .storageAccounts() .define(generateRandomResourceName("jvcsrg", 10)) .withRegion(region) .withExistingResourceGroup(resourceGroup) .create(); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withSystemAssignedManagedServiceIdentity() .withSystemAssignedIdentityBasedAccessToCurrentResourceGroup(BuiltInRole.CONTRIBUTOR) .withSystemAssignedIdentityBasedAccessTo(storageAccount.id(), BuiltInRole.CONTRIBUTOR) .create(); Assertions.assertNotNull(virtualMachineScaleSet.managedServiceIdentityType()); Assertions .assertTrue( virtualMachineScaleSet.managedServiceIdentityType().equals(ResourceIdentityType.SYSTEM_ASSIGNED)); PagedIterable<RoleAssignment> rgRoleAssignments = authorizationManager.roleAssignments().listByScope(resourceGroup.id()); Assertions.assertNotNull(rgRoleAssignments); boolean found = false; for (RoleAssignment roleAssignment : rgRoleAssignments) { if (roleAssignment.principalId() != null && roleAssignment .principalId() .equalsIgnoreCase(virtualMachineScaleSet.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions .assertTrue( found, "Resource group should have a role assignment with virtual machine scale set MSI principal"); PagedIterable<RoleAssignment> stgRoleAssignments = authorizationManager.roleAssignments().listByScope(storageAccount.id()); Assertions.assertNotNull(stgRoleAssignments); found = false; for (RoleAssignment roleAssignment : stgRoleAssignments) { if (roleAssignment.principalId() != null && roleAssignment .principalId() .equalsIgnoreCase(virtualMachineScaleSet.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions .assertTrue( found, "Storage account should have a role assignment with virtual machine scale set MSI principal"); } @Test public void canGetSingleVMSSInstance() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region, resourceGroup, "1", LoadBalancerSkuType.BASIC); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withNewStorageAccount(generateRandomResourceName("stg", 15)) .withNewStorageAccount(generateRandomResourceName("stg3", 15)) .withUpgradeMode(UpgradeMode.MANUAL) .create(); VirtualMachineScaleSet virtualMachineScaleSet = this.computeManager.virtualMachineScaleSets().getByResourceGroup(rgName, vmssName); VirtualMachineScaleSetVMs virtualMachineScaleSetVMs = virtualMachineScaleSet.virtualMachines(); VirtualMachineScaleSetVM firstVm = virtualMachineScaleSetVMs.list().iterator().next(); VirtualMachineScaleSetVM fetchedVm = virtualMachineScaleSetVMs.getInstance(firstVm.instanceId()); this.checkVmsEqual(firstVm, fetchedVm); VirtualMachineScaleSetVM fetchedAsyncVm = virtualMachineScaleSetVMs.getInstanceAsync(firstVm.instanceId()).block(); this.checkVmsEqual(firstVm, fetchedAsyncVm); } @Test public void canCreateLowPriorityVMSSInstance() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region, resourceGroup, "1", LoadBalancerSkuType.STANDARD); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); VirtualMachineScaleSet vmss = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_D3_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withNewStorageAccount(generateRandomResourceName("stg", 15)) .withNewStorageAccount(generateRandomResourceName("stg3", 15)) .withUpgradeMode(UpgradeMode.MANUAL) .withLowPriorityVirtualMachine(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withMaxPrice(-1.0) .create(); Assertions.assertEquals(vmss.virtualMachinePriority(), VirtualMachinePriorityTypes.LOW); Assertions.assertEquals(vmss.virtualMachineEvictionPolicy(), VirtualMachineEvictionPolicyTypes.DEALLOCATE); Assertions.assertEquals(vmss.billingProfile().maxPrice(), (Double) (-1.0)); vmss.update().withMaxPrice(2000.0).apply(); Assertions.assertEquals(vmss.billingProfile().maxPrice(), (Double) 2000.0); } @Test public void canPerformSimulateEvictionOnSpotVMSSInstance() { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups() .define(rgName) .withRegion(region) .create(); Network network = this.networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); VirtualMachineScaleSet vmss = computeManager.virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_D3_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withoutPrimaryInternetFacingLoadBalancer() .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withSpotPriorityVirtualMachine(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .create(); PagedIterable<VirtualMachineScaleSetVM> vmInstances = vmss.virtualMachines().list(); for (VirtualMachineScaleSetVM instance: vmInstances) { Assertions.assertTrue(instance.osDiskSizeInGB() > 0); vmss.virtualMachines().simulateEviction(instance.instanceId()); } boolean deallocated = false; int pollIntervalInMinutes = 5; for (int i = 0; i < 30; i += pollIntervalInMinutes) { ResourceManagerUtils.sleep(Duration.ofMinutes(pollIntervalInMinutes)); deallocated = true; for (VirtualMachineScaleSetVM instance: vmInstances) { instance.refresh(); if (instance.powerState() != PowerState.DEALLOCATED) { deallocated = false; } } if (deallocated) { break; } } Assertions.assertTrue(deallocated); for (VirtualMachineScaleSetVM instance: vmInstances) { instance.refresh(); Assertions.assertEquals(0, instance.osDiskSizeInGB()); } } private void checkVmsEqual(VirtualMachineScaleSetVM original, VirtualMachineScaleSetVM fetched) { Assertions.assertEquals(original.administratorUserName(), fetched.administratorUserName()); Assertions.assertEquals(original.availabilitySetId(), fetched.availabilitySetId()); Assertions.assertEquals(original.bootDiagnosticEnabled(), fetched.bootDiagnosticEnabled()); Assertions.assertEquals(original.bootDiagnosticStorageAccountUri(), fetched.bootDiagnosticStorageAccountUri()); Assertions.assertEquals(original.computerName(), fetched.computerName()); Assertions.assertEquals(original.dataDisks().size(), fetched.dataDisks().size()); Assertions.assertEquals(original.extensions().size(), fetched.extensions().size()); Assertions.assertEquals(original.instanceId(), fetched.instanceId()); Assertions.assertEquals(original.isLatestScaleSetUpdateApplied(), fetched.isLatestScaleSetUpdateApplied()); Assertions.assertEquals(original.isLinuxPasswordAuthenticationEnabled(), fetched.isLinuxPasswordAuthenticationEnabled()); Assertions.assertEquals(original.isManagedDiskEnabled(), fetched.isManagedDiskEnabled()); Assertions.assertEquals(original.isOSBasedOnCustomImage(), fetched.isOSBasedOnCustomImage()); Assertions.assertEquals(original.isOSBasedOnPlatformImage(), fetched.isOSBasedOnPlatformImage()); Assertions.assertEquals(original.isOSBasedOnStoredImage(), fetched.isOSBasedOnStoredImage()); Assertions.assertEquals(original.isWindowsAutoUpdateEnabled(), fetched.isWindowsAutoUpdateEnabled()); Assertions.assertEquals(original.isWindowsVMAgentProvisioned(), original.isWindowsVMAgentProvisioned()); Assertions.assertEquals(original.networkInterfaceIds().size(), fetched.networkInterfaceIds().size()); Assertions.assertEquals(original.osDiskCachingType(), fetched.osDiskCachingType()); Assertions.assertEquals(original.osDiskId(), fetched.osDiskId()); Assertions.assertEquals(original.osDiskName(), fetched.osDiskName()); Assertions.assertEquals(original.osDiskSizeInGB(), fetched.osDiskSizeInGB()); Assertions.assertEquals(original.osType(), fetched.osType()); Assertions.assertEquals(original.osUnmanagedDiskVhdUri(), fetched.osUnmanagedDiskVhdUri()); Assertions.assertEquals(original.powerState(), fetched.powerState()); Assertions.assertEquals(original.primaryNetworkInterfaceId(), fetched.primaryNetworkInterfaceId()); Assertions.assertEquals(original.size(), fetched.size()); Assertions.assertEquals(original.sku().name(), fetched.sku().name()); Assertions.assertEquals(original.storedImageUnmanagedVhdUri(), fetched.storedImageUnmanagedVhdUri()); Assertions.assertEquals(original.unmanagedDataDisks().size(), fetched.unmanagedDataDisks().size()); Assertions.assertEquals(original.windowsTimeZone(), fetched.windowsTimeZone()); } private void checkVMInstances(VirtualMachineScaleSet vmScaleSet) { VirtualMachineScaleSetVMs virtualMachineScaleSetVMs = vmScaleSet.virtualMachines(); PagedIterable<VirtualMachineScaleSetVM> virtualMachines = virtualMachineScaleSetVMs.list(); Assertions.assertEquals(TestUtilities.getSize(virtualMachines), vmScaleSet.capacity()); Assertions.assertTrue(TestUtilities.getSize(virtualMachines) > 0); virtualMachineScaleSetVMs.updateInstances(virtualMachines.iterator().next().instanceId()); for (VirtualMachineScaleSetVM vm : virtualMachines) { Assertions.assertNotNull(vm.size()); Assertions.assertEquals(vm.osType(), OperatingSystemTypes.LINUX); Assertions.assertNotNull(vm.computerName().startsWith(vmScaleSet.computerNamePrefix())); Assertions.assertTrue(vm.isOSBasedOnPlatformImage()); Assertions.assertNull(vm.osDiskId()); Assertions.assertNotNull(vm.osUnmanagedDiskVhdUri()); Assertions.assertNull(vm.storedImageUnmanagedVhdUri()); Assertions.assertFalse(vm.isWindowsAutoUpdateEnabled()); Assertions.assertFalse(vm.isWindowsVMAgentProvisioned()); Assertions.assertTrue(vm.administratorUserName().equalsIgnoreCase("jvuser")); VirtualMachineImage vmImage = vm.getOSPlatformImage(); Assertions.assertNotNull(vmImage); Assertions.assertEquals(vm.extensions().size(), vmScaleSet.extensions().size()); Assertions.assertNotNull(vm.powerState()); vm.refreshInstanceView(); } VirtualMachineScaleSetVM virtualMachineScaleSetVM = virtualMachines.iterator().next(); Assertions.assertNotNull(virtualMachineScaleSetVM); virtualMachineScaleSetVM.restart(); virtualMachineScaleSetVM.powerOff(); virtualMachineScaleSetVM.refreshInstanceView(); Assertions.assertEquals(virtualMachineScaleSetVM.powerState(), PowerState.STOPPED); virtualMachineScaleSetVM.start(); for (VirtualMachineScaleSetVM vm : virtualMachines) { PagedIterable<VirtualMachineScaleSetNetworkInterface> nics = vmScaleSet.listNetworkInterfacesByInstanceId(vm.instanceId()); Assertions.assertNotNull(nics); Assertions.assertEquals(TestUtilities.getSize(nics), 1); VirtualMachineScaleSetNetworkInterface nic = nics.iterator().next(); Assertions.assertNotNull(nic.virtualMachineId()); Assertions.assertTrue(nic.virtualMachineId().toLowerCase().equalsIgnoreCase(vm.id())); Assertions.assertNotNull(vm.listNetworkInterfaces()); VirtualMachineScaleSetNetworkInterface nicA = vmScaleSet.getNetworkInterfaceByInstanceId(vm.instanceId(), nic.name()); Assertions.assertNotNull(nicA); VirtualMachineScaleSetNetworkInterface nicB = vm.getNetworkInterface(nic.name()); String nicIdB = vm.getNetworkInterfaceAsync(nic.name()).map(n -> nic.primaryIPConfiguration().networkId()).block(); Assertions.assertNotNull(nicB); Assertions.assertNotNull(nicIdB); } } @Test public void testVirtualMachineScaleSetSkuTypes() { rgName = null; VirtualMachineScaleSetSkuTypes skuType = VirtualMachineScaleSetSkuTypes.STANDARD_A0; Assertions.assertNull(skuType.sku().capacity()); Sku sku1 = skuType.sku(); Assertions.assertNull(sku1.capacity()); sku1.withCapacity(1L); Assertions.assertEquals(sku1.capacity().longValue(), 1); Assertions.assertNull(skuType.sku().capacity()); Sku sku2 = skuType.sku(); Assertions.assertNull(sku2.capacity()); sku2.withCapacity(2L); Assertions.assertEquals(sku2.capacity().longValue(), 2); Assertions.assertNull(skuType.sku().capacity()); Assertions.assertEquals(sku1.capacity().longValue(), 1); } @Test public void canDeleteVMSSInstance() throws Exception { String euapRegion = "eastus2euap"; final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName) .withRegion(euapRegion) .create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(euapRegion) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); VirtualMachineScaleSet vmss = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(euapRegion) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withoutPrimaryInternetFacingLoadBalancer() .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(4) .create(); Assertions.assertEquals(4, vmss.virtualMachines().list().stream().count()); List<String> firstTwoIds = vmss.virtualMachines().list().stream() .limit(2) .map(VirtualMachineScaleSetVM::instanceId) .collect(Collectors.toList()); vmss.virtualMachines().deleteInstances(firstTwoIds, true); Assertions.assertEquals(2, vmss.virtualMachines().list().stream().count()); vmss.virtualMachines().deleteInstances(Collections.singleton(vmss.virtualMachines().list().stream().findFirst().get().instanceId()), false); Assertions.assertEquals(1, vmss.virtualMachines().list().stream().count()); computeManager.virtualMachineScaleSets().deleteInstances(rgName, vmssName, Collections.singleton(vmss.virtualMachines().list().stream().findFirst().get().instanceId()), false); } }
class VirtualMachineScaleSetOperationsTests extends ComputeManagementTest { private String rgName = ""; private final Region region = Region.US_WEST; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { rgName = generateRandomResourceName("javacsmrg", 15); super.initializeClients(httpPipeline, profile); } @Override protected void cleanUpResources() { if (rgName != null) { resourceManager.resourceGroups().beginDeleteByName(rgName); } } @Test public void canCreateVMSSWithPlan() { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); final String uname = "jvuser"; Network network = this .networkManager .networks() .define(generateRandomResourceName("vmssvnet", 15)) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); PurchasePlan plan = new PurchasePlan() .withName("access_server_byol") .withPublisher("openvpn") .withProduct("openvpnas"); ImageReference imageReference = new ImageReference() .withPublisher("openvpn") .withOffer("openvpnas") .withSku("access_server_byol") .withVersion("latest"); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withoutPrimaryInternetFacingLoadBalancer() .withoutPrimaryInternalLoadBalancer() .withSpecificLinuxImageVersion(imageReference) .withRootUsername(uname) .withSsh(sshPublicKey()) .withNewDataDisk(1) .withPlan(plan) .create(); VirtualMachineScaleSet currentVirtualMachineScaleSet = this.computeManager.virtualMachineScaleSets().getByResourceGroup(rgName, vmssName); Assertions.assertEquals("access_server_byol", currentVirtualMachineScaleSet.plan().name()); Assertions.assertEquals("openvpn", currentVirtualMachineScaleSet.plan().publisher()); Assertions.assertEquals("openvpnas", currentVirtualMachineScaleSet.plan().product()); } @Test public void canUpdateVirtualMachineScaleSetWithExtensionProtectedSettings() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); final String uname = "jvuser"; final String password = password(); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); StorageAccount storageAccount = this .storageManager .storageAccounts() .define(generateRandomResourceName("stg", 15)) .withRegion(region) .withExistingResourceGroup(resourceGroup) .create(); List<StorageAccountKey> keys = storageAccount.getKeys(); Assertions.assertNotNull(keys); Assertions.assertTrue(keys.size() > 0); String storageAccountKey = keys.get(0).value(); Network network = this .networkManager .networks() .define(generateRandomResourceName("vmssvnet", 15)) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withoutPrimaryInternetFacingLoadBalancer() .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername(uname) .withSsh(sshPublicKey()) .withUnmanagedDisks() .withNewStorageAccount(generateRandomResourceName("stg", 15)) .withExistingStorageAccount(storageAccount) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("commandToExecute", "ls") .withProtectedSetting("storageAccountName", storageAccount.name()) .withProtectedSetting("storageAccountKey", storageAccountKey) .attach() .create(); Map<String, VirtualMachineScaleSetExtension> extensions = virtualMachineScaleSet.extensions(); Assertions.assertNotNull(extensions); Assertions.assertTrue(extensions.size() > 0); Assertions.assertTrue(extensions.containsKey("CustomScriptForLinux")); VirtualMachineScaleSetExtension extension = extensions.get("CustomScriptForLinux"); Assertions.assertNotNull(extension.publicSettings()); Assertions.assertEquals(1, extension.publicSettings().size()); Assertions.assertNotNull(extension.publicSettingsAsJsonString()); VirtualMachineScaleSet scaleSet = this.computeManager.virtualMachineScaleSets().getById(virtualMachineScaleSet.id()); extensions = scaleSet.extensions(); Assertions.assertNotNull(extensions); Assertions.assertTrue(extensions.size() > 0); Assertions.assertTrue(extensions.containsKey("CustomScriptForLinux")); extension = extensions.get("CustomScriptForLinux"); Assertions.assertNotNull(extension.publicSettings()); Assertions.assertEquals(1, extension.publicSettings().size()); Assertions.assertNotNull(extension.publicSettingsAsJsonString()); int newCapacity = scaleSet.capacity() + 1; virtualMachineScaleSet.update().withCapacity(newCapacity).apply(); extensions = virtualMachineScaleSet.extensions(); Assertions.assertNotNull(extensions); Assertions.assertTrue(extensions.size() > 0); Assertions.assertTrue(extensions.containsKey("CustomScriptForLinux")); extension = extensions.get("CustomScriptForLinux"); Assertions.assertNotNull(extension.publicSettings()); Assertions.assertEquals(1, extension.publicSettings().size()); Assertions.assertNotNull(extension.publicSettingsAsJsonString()); } @Test @Test public void canCreateVirtualMachineScaleSetWithOptionalNetworkSettings() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); final String vmssVmDnsLabel = generateRandomResourceName("pip", 10); final String nsgName = generateRandomResourceName("nsg", 10); final String asgName = generateRandomResourceName("asg", 8); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); ApplicationSecurityGroup asg = this .networkManager .applicationSecurityGroups() .define(asgName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .create(); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_D3_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withoutPrimaryInternetFacingLoadBalancer() .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withVirtualMachinePublicIp(vmssVmDnsLabel) .withExistingApplicationSecurityGroup(asg) .create(); VirtualMachineScaleSetPublicIpAddressConfiguration currentIpConfig = virtualMachineScaleSet.virtualMachinePublicIpConfig(); Assertions.assertNotNull(currentIpConfig); Assertions.assertNotNull(currentIpConfig.dnsSettings()); Assertions.assertNotNull(currentIpConfig.dnsSettings().domainNameLabel()); currentIpConfig.withIdleTimeoutInMinutes(20); virtualMachineScaleSet.update().withVirtualMachinePublicIp(currentIpConfig).apply(); currentIpConfig = virtualMachineScaleSet.virtualMachinePublicIpConfig(); Assertions.assertNotNull(currentIpConfig); Assertions.assertNotNull(currentIpConfig.idleTimeoutInMinutes()); Assertions.assertEquals((long) 20, (long) currentIpConfig.idleTimeoutInMinutes()); virtualMachineScaleSet.refresh(); currentIpConfig = virtualMachineScaleSet.virtualMachinePublicIpConfig(); Assertions.assertNotNull(currentIpConfig); Assertions.assertNotNull(currentIpConfig.idleTimeoutInMinutes()); Assertions.assertEquals((long) 20, (long) currentIpConfig.idleTimeoutInMinutes()); List<String> asgIds = virtualMachineScaleSet.applicationSecurityGroupIds(); Assertions.assertNotNull(asgIds); Assertions.assertEquals(1, asgIds.size()); NetworkSecurityGroup nsg = networkManager .networkSecurityGroups() .define(nsgName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .defineRule("rule1") .allowOutbound() .fromAnyAddress() .fromPort(80) .toAnyAddress() .toPort(80) .withProtocol(SecurityRuleProtocol.TCP) .attach() .create(); virtualMachineScaleSet.deallocate(); virtualMachineScaleSet .update() .withIpForwarding() .withAcceleratedNetworking() .withExistingNetworkSecurityGroup(nsg) .apply(); Assertions.assertTrue(virtualMachineScaleSet.isIpForwardingEnabled()); Assertions.assertTrue(virtualMachineScaleSet.isAcceleratedNetworkingEnabled()); Assertions.assertNotNull(virtualMachineScaleSet.networkSecurityGroupId()); virtualMachineScaleSet.refresh(); Assertions.assertTrue(virtualMachineScaleSet.isIpForwardingEnabled()); Assertions.assertTrue(virtualMachineScaleSet.isAcceleratedNetworkingEnabled()); Assertions.assertNotNull(virtualMachineScaleSet.networkSecurityGroupId()); virtualMachineScaleSet .update() .withoutIpForwarding() .withoutAcceleratedNetworking() .withoutNetworkSecurityGroup() .apply(); Assertions.assertFalse(virtualMachineScaleSet.isIpForwardingEnabled()); Assertions.assertFalse(virtualMachineScaleSet.isAcceleratedNetworkingEnabled()); Assertions.assertNull(virtualMachineScaleSet.networkSecurityGroupId()); } @Test @Disabled("Mock framework doesn't support data plane") public void canCreateVirtualMachineScaleSetWithSecret() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); final String vaultName = generateRandomResourceName("vlt", 10); final String secretName = generateRandomResourceName("srt", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region, resourceGroup, "1", LoadBalancerSkuType.BASIC); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); Vault vault = this .keyVaultManager .vaults() .define(vaultName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .defineAccessPolicy() .forServicePrincipal(clientIdFromFile()) .allowSecretAllPermissions() .attach() .withDeploymentEnabled() .create(); final InputStream embeddedJsonConfig = VirtualMachineExtensionOperationsTests.class.getResourceAsStream("/myTest.txt"); String secretValue = IOUtils.toString(embeddedJsonConfig, StandardCharsets.UTF_8); Secret secret = vault.secrets().define(secretName).withValue(secretValue).create(); List<VaultCertificate> certs = new ArrayList<>(); certs.add(new VaultCertificate().withCertificateUrl(secret.id())); List<VaultSecretGroup> group = new ArrayList<>(); group .add( new VaultSecretGroup() .withSourceVault(new SubResource().withId(vault.id())) .withVaultCertificates(certs)); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withSecrets(group) .withNewStorageAccount(generateRandomResourceName("stg", 15)) .withNewStorageAccount(generateRandomResourceName("stg3", 15)) .withUpgradeMode(UpgradeMode.MANUAL) .create(); for (VirtualMachineScaleSetVM vm : virtualMachineScaleSet.virtualMachines().list()) { Assertions.assertTrue(vm.osProfile().secrets().size() > 0); } virtualMachineScaleSet.update().withoutSecrets().apply(); for (VirtualMachineScaleSetVM vm : virtualMachineScaleSet.virtualMachines().list()) { Assertions.assertTrue(vm.osProfile().secrets().size() == 0); } } public void canCreateVirtualMachineScaleSet() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region, resourceGroup, "1", LoadBalancerSkuType.BASIC); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withNewStorageAccount(generateRandomResourceName("stg", 15)) .withNewStorageAccount(generateRandomResourceName("stg", 15)) .create(); Assertions.assertNull(virtualMachineScaleSet.getPrimaryInternalLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternalLoadBalancerBackends().size() == 0); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternalLoadBalancerInboundNatPools().size() == 0); Assertions.assertNotNull(virtualMachineScaleSet.getPrimaryInternetFacingLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerBackends().size() == 2); Assertions .assertTrue(virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerInboundNatPools().size() == 2); Network primaryNetwork = virtualMachineScaleSet.getPrimaryNetwork(); Assertions.assertNotNull(primaryNetwork.id()); PagedIterable<VirtualMachineScaleSetNetworkInterface> nics = virtualMachineScaleSet.listNetworkInterfaces(); int nicCount = 0; for (VirtualMachineScaleSetNetworkInterface nic : nics) { nicCount++; Assertions.assertNotNull(nic.id()); Assertions .assertTrue(nic.virtualMachineId().toLowerCase().startsWith(virtualMachineScaleSet.id().toLowerCase())); Assertions.assertNotNull(nic.macAddress()); Assertions.assertNotNull(nic.dnsServers()); Assertions.assertNotNull(nic.appliedDnsServers()); Map<String, VirtualMachineScaleSetNicIpConfiguration> ipConfigs = nic.ipConfigurations(); Assertions.assertEquals(ipConfigs.size(), 1); for (Map.Entry<String, VirtualMachineScaleSetNicIpConfiguration> entry : ipConfigs.entrySet()) { VirtualMachineScaleSetNicIpConfiguration ipConfig = entry.getValue(); Assertions.assertNotNull(ipConfig); Assertions.assertTrue(ipConfig.isPrimary()); Assertions.assertNotNull(ipConfig.subnetName()); Assertions.assertTrue(primaryNetwork.id().toLowerCase().equalsIgnoreCase(ipConfig.networkId())); Assertions.assertNotNull(ipConfig.privateIpAddress()); Assertions.assertNotNull(ipConfig.privateIpAddressVersion()); Assertions.assertNotNull(ipConfig.privateIpAllocationMethod()); List<LoadBalancerBackend> lbBackends = ipConfig.listAssociatedLoadBalancerBackends(); Assertions.assertEquals(lbBackends.size(), 2); for (LoadBalancerBackend lbBackend : lbBackends) { Map<String, LoadBalancingRule> lbRules = lbBackend.loadBalancingRules(); Assertions.assertEquals(lbRules.size(), 1); for (Map.Entry<String, LoadBalancingRule> ruleEntry : lbRules.entrySet()) { LoadBalancingRule rule = ruleEntry.getValue(); Assertions.assertNotNull(rule); Assertions .assertTrue( (rule.frontendPort() == 80 && rule.backendPort() == 80) || (rule.frontendPort() == 443 && rule.backendPort() == 443)); } } List<LoadBalancerInboundNatRule> lbNatRules = ipConfig.listAssociatedLoadBalancerInboundNatRules(); Assertions.assertEquals(lbNatRules.size(), 2); for (LoadBalancerInboundNatRule lbNatRule : lbNatRules) { Assertions .assertTrue( (lbNatRule.frontendPort() >= 5000 && lbNatRule.frontendPort() <= 5099) || (lbNatRule.frontendPort() >= 6000 && lbNatRule.frontendPort() <= 6099)); Assertions.assertTrue(lbNatRule.backendPort() == 22 || lbNatRule.backendPort() == 23); } } } Assertions.assertTrue(nicCount > 0); Assertions.assertEquals(virtualMachineScaleSet.vhdContainers().size(), 2); Assertions.assertEquals(virtualMachineScaleSet.sku(), VirtualMachineScaleSetSkuTypes.STANDARD_A0); Assertions.assertTrue(virtualMachineScaleSet.upgradeModel() == UpgradeMode.AUTOMATIC); Assertions.assertEquals(virtualMachineScaleSet.capacity(), 2); primaryNetwork = virtualMachineScaleSet.getPrimaryNetwork(); String inboundNatPoolToRemove = null; for (String inboundNatPoolName : virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerInboundNatPools().keySet()) { inboundNatPoolToRemove = inboundNatPoolName; break; } LoadBalancer internalLoadBalancer = createInternalLoadBalancer(region, resourceGroup, primaryNetwork, "1"); virtualMachineScaleSet .update() .withExistingPrimaryInternalLoadBalancer(internalLoadBalancer) .withoutPrimaryInternetFacingLoadBalancerNatPools(inboundNatPoolToRemove) .apply(); virtualMachineScaleSet = this.computeManager.virtualMachineScaleSets().getByResourceGroup(rgName, vmssName); Assertions.assertNotNull(virtualMachineScaleSet.getPrimaryInternetFacingLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerBackends().size() == 2); Assertions .assertTrue(virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerInboundNatPools().size() == 1); Assertions.assertNotNull(virtualMachineScaleSet.getPrimaryInternalLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternalLoadBalancerBackends().size() == 2); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternalLoadBalancerInboundNatPools().size() == 2); nics = virtualMachineScaleSet.listNetworkInterfaces(); nicCount = 0; for (VirtualMachineScaleSetNetworkInterface nic : nics) { nicCount++; Map<String, VirtualMachineScaleSetNicIpConfiguration> ipConfigs = nic.ipConfigurations(); Assertions.assertEquals(ipConfigs.size(), 1); for (Map.Entry<String, VirtualMachineScaleSetNicIpConfiguration> entry : ipConfigs.entrySet()) { VirtualMachineScaleSetNicIpConfiguration ipConfig = entry.getValue(); Assertions.assertNotNull(ipConfig); List<LoadBalancerBackend> lbBackends = ipConfig.listAssociatedLoadBalancerBackends(); Assertions.assertNotNull(lbBackends); for (LoadBalancerBackend lbBackend : lbBackends) { Map<String, LoadBalancingRule> lbRules = lbBackend.loadBalancingRules(); Assertions.assertEquals(lbRules.size(), 1); for (Map.Entry<String, LoadBalancingRule> ruleEntry : lbRules.entrySet()) { LoadBalancingRule rule = ruleEntry.getValue(); Assertions.assertNotNull(rule); Assertions .assertTrue( (rule.frontendPort() == 80 && rule.backendPort() == 80) || (rule.frontendPort() == 443 && rule.backendPort() == 443) || (rule.frontendPort() == 1000 && rule.backendPort() == 1000) || (rule.frontendPort() == 1001 && rule.backendPort() == 1001)); } } List<LoadBalancerInboundNatRule> lbNatRules = ipConfig.listAssociatedLoadBalancerInboundNatRules(); for (LoadBalancerInboundNatRule lbNatRule : lbNatRules) { Assertions .assertTrue( (lbNatRule.frontendPort() >= 6000 && lbNatRule.frontendPort() <= 6099) || (lbNatRule.frontendPort() >= 5000 && lbNatRule.frontendPort() <= 5099) || (lbNatRule.frontendPort() >= 8000 && lbNatRule.frontendPort() <= 8099) || (lbNatRule.frontendPort() >= 9000 && lbNatRule.frontendPort() <= 9099)); Assertions .assertTrue( lbNatRule.backendPort() == 23 || lbNatRule.backendPort() == 22 || lbNatRule.backendPort() == 44 || lbNatRule.backendPort() == 45); } } } Assertions.assertTrue(nicCount > 0); } /* * Previously name * canCreateTwoRegionalVirtualMachineScaleSetsAndAssociateEachWithDifferentBackendPoolOfZoneResilientLoadBalancer * but this was too long for some OSes and would cause git checkout to fail. */ @Test public void canCreateTwoRegionalVMScaleSetsWithDifferentPoolOfZoneResilientLoadBalancer() throws Exception { Region region2 = Region.US_EAST2; ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region2).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region2) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region2, resourceGroup, "1", LoadBalancerSkuType.STANDARD); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); List<String> natpools = new ArrayList<>(); for (String natPool : publicLoadBalancer.inboundNatPools().keySet()) { natpools.add(natPool); } Assertions.assertTrue(natpools.size() == 2); final String vmssName1 = generateRandomResourceName("vmss1", 10); VirtualMachineScaleSet virtualMachineScaleSet1 = this .computeManager .virtualMachineScaleSets() .define(vmssName1) .withRegion(region2) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0)) .withPrimaryInternetFacingLoadBalancerInboundNatPools(natpools.get(0)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); final String vmssName2 = generateRandomResourceName("vmss2", 10); VirtualMachineScaleSet virtualMachineScaleSet2 = this .computeManager .virtualMachineScaleSets() .define(vmssName2) .withRegion(region2) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(1)) .withPrimaryInternetFacingLoadBalancerInboundNatPools(natpools.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); Assertions.assertNull(virtualMachineScaleSet1.getPrimaryInternalLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet1.listPrimaryInternalLoadBalancerBackends().size() == 0); Assertions.assertTrue(virtualMachineScaleSet1.listPrimaryInternalLoadBalancerInboundNatPools().size() == 0); Assertions.assertNotNull(virtualMachineScaleSet1.getPrimaryInternetFacingLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet1.listPrimaryInternetFacingLoadBalancerBackends().size() == 1); Assertions.assertNull(virtualMachineScaleSet2.getPrimaryInternalLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet2.listPrimaryInternalLoadBalancerBackends().size() == 0); Assertions.assertTrue(virtualMachineScaleSet2.listPrimaryInternalLoadBalancerInboundNatPools().size() == 0); Assertions.assertNotNull(virtualMachineScaleSet2.getPrimaryInternetFacingLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet2.listPrimaryInternetFacingLoadBalancerBackends().size() == 1); } @Test public void canCreateZoneRedundantVirtualMachineScaleSetWithZoneResilientLoadBalancer() throws Exception { Region region2 = Region.US_EAST2; ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region2).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region2) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region2, resourceGroup, "1", LoadBalancerSkuType.STANDARD); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); final String vmssName = generateRandomResourceName("vmss", 10); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region2) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_D3_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withAvailabilityZone(AvailabilityZoneId.ZONE_1) .withAvailabilityZone(AvailabilityZoneId.ZONE_2) .create(); Assertions.assertNotNull(virtualMachineScaleSet.availabilityZones()); Assertions.assertEquals(2, virtualMachineScaleSet.availabilityZones().size()); Assertions.assertNull(virtualMachineScaleSet.getPrimaryInternalLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternalLoadBalancerBackends().size() == 0); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternalLoadBalancerInboundNatPools().size() == 0); Assertions.assertNotNull(virtualMachineScaleSet.getPrimaryInternetFacingLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerBackends().size() == 2); Assertions .assertTrue(virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerInboundNatPools().size() == 2); Network primaryNetwork = virtualMachineScaleSet.getPrimaryNetwork(); Assertions.assertNotNull(primaryNetwork.id()); } @Test public void canEnableMSIOnVirtualMachineScaleSetWithoutRoleAssignment() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region, resourceGroup, "1", LoadBalancerSkuType.BASIC); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withSystemAssignedManagedServiceIdentity() .create(); PagedIterable<RoleAssignment> rgRoleAssignments = authorizationManager.roleAssignments().listByScope(resourceGroup.id()); Assertions.assertNotNull(rgRoleAssignments); boolean found = false; for (RoleAssignment roleAssignment : rgRoleAssignments) { if (roleAssignment.principalId() != null && roleAssignment .principalId() .equalsIgnoreCase(virtualMachineScaleSet.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions .assertFalse( found, "Resource group should not have a role assignment with virtual machine scale set MSI principal"); } @Test public void canEnableMSIOnVirtualMachineScaleSetWithMultipleRoleAssignment() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region, resourceGroup, "1", LoadBalancerSkuType.BASIC); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); StorageAccount storageAccount = this .storageManager .storageAccounts() .define(generateRandomResourceName("jvcsrg", 10)) .withRegion(region) .withExistingResourceGroup(resourceGroup) .create(); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withSystemAssignedManagedServiceIdentity() .withSystemAssignedIdentityBasedAccessToCurrentResourceGroup(BuiltInRole.CONTRIBUTOR) .withSystemAssignedIdentityBasedAccessTo(storageAccount.id(), BuiltInRole.CONTRIBUTOR) .create(); Assertions.assertNotNull(virtualMachineScaleSet.managedServiceIdentityType()); Assertions .assertTrue( virtualMachineScaleSet.managedServiceIdentityType().equals(ResourceIdentityType.SYSTEM_ASSIGNED)); PagedIterable<RoleAssignment> rgRoleAssignments = authorizationManager.roleAssignments().listByScope(resourceGroup.id()); Assertions.assertNotNull(rgRoleAssignments); boolean found = false; for (RoleAssignment roleAssignment : rgRoleAssignments) { if (roleAssignment.principalId() != null && roleAssignment .principalId() .equalsIgnoreCase(virtualMachineScaleSet.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions .assertTrue( found, "Resource group should have a role assignment with virtual machine scale set MSI principal"); PagedIterable<RoleAssignment> stgRoleAssignments = authorizationManager.roleAssignments().listByScope(storageAccount.id()); Assertions.assertNotNull(stgRoleAssignments); found = false; for (RoleAssignment roleAssignment : stgRoleAssignments) { if (roleAssignment.principalId() != null && roleAssignment .principalId() .equalsIgnoreCase(virtualMachineScaleSet.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions .assertTrue( found, "Storage account should have a role assignment with virtual machine scale set MSI principal"); } @Test public void canGetSingleVMSSInstance() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region, resourceGroup, "1", LoadBalancerSkuType.BASIC); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withNewStorageAccount(generateRandomResourceName("stg", 15)) .withNewStorageAccount(generateRandomResourceName("stg3", 15)) .withUpgradeMode(UpgradeMode.MANUAL) .create(); VirtualMachineScaleSet virtualMachineScaleSet = this.computeManager.virtualMachineScaleSets().getByResourceGroup(rgName, vmssName); VirtualMachineScaleSetVMs virtualMachineScaleSetVMs = virtualMachineScaleSet.virtualMachines(); VirtualMachineScaleSetVM firstVm = virtualMachineScaleSetVMs.list().iterator().next(); VirtualMachineScaleSetVM fetchedVm = virtualMachineScaleSetVMs.getInstance(firstVm.instanceId()); this.checkVmsEqual(firstVm, fetchedVm); VirtualMachineScaleSetVM fetchedAsyncVm = virtualMachineScaleSetVMs.getInstanceAsync(firstVm.instanceId()).block(); this.checkVmsEqual(firstVm, fetchedAsyncVm); } @Test public void canCreateLowPriorityVMSSInstance() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region, resourceGroup, "1", LoadBalancerSkuType.STANDARD); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); VirtualMachineScaleSet vmss = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_D3_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withNewStorageAccount(generateRandomResourceName("stg", 15)) .withNewStorageAccount(generateRandomResourceName("stg3", 15)) .withUpgradeMode(UpgradeMode.MANUAL) .withLowPriorityVirtualMachine(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withMaxPrice(-1.0) .create(); Assertions.assertEquals(vmss.virtualMachinePriority(), VirtualMachinePriorityTypes.LOW); Assertions.assertEquals(vmss.virtualMachineEvictionPolicy(), VirtualMachineEvictionPolicyTypes.DEALLOCATE); Assertions.assertEquals(vmss.billingProfile().maxPrice(), (Double) (-1.0)); vmss.update().withMaxPrice(2000.0).apply(); Assertions.assertEquals(vmss.billingProfile().maxPrice(), (Double) 2000.0); } @Test public void canPerformSimulateEvictionOnSpotVMSSInstance() { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups() .define(rgName) .withRegion(region) .create(); Network network = this.networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); VirtualMachineScaleSet vmss = computeManager.virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_D3_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withoutPrimaryInternetFacingLoadBalancer() .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withSpotPriorityVirtualMachine(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .create(); PagedIterable<VirtualMachineScaleSetVM> vmInstances = vmss.virtualMachines().list(); for (VirtualMachineScaleSetVM instance: vmInstances) { Assertions.assertTrue(instance.osDiskSizeInGB() > 0); vmss.virtualMachines().simulateEviction(instance.instanceId()); } boolean deallocated = false; int pollIntervalInMinutes = 5; for (int i = 0; i < 30; i += pollIntervalInMinutes) { ResourceManagerUtils.sleep(Duration.ofMinutes(pollIntervalInMinutes)); deallocated = true; for (VirtualMachineScaleSetVM instance: vmInstances) { instance.refresh(); if (instance.powerState() != PowerState.DEALLOCATED) { deallocated = false; } } if (deallocated) { break; } } Assertions.assertTrue(deallocated); for (VirtualMachineScaleSetVM instance: vmInstances) { instance.refresh(); Assertions.assertEquals(0, instance.osDiskSizeInGB()); } } private void checkVmsEqual(VirtualMachineScaleSetVM original, VirtualMachineScaleSetVM fetched) { Assertions.assertEquals(original.administratorUserName(), fetched.administratorUserName()); Assertions.assertEquals(original.availabilitySetId(), fetched.availabilitySetId()); Assertions.assertEquals(original.bootDiagnosticEnabled(), fetched.bootDiagnosticEnabled()); Assertions.assertEquals(original.bootDiagnosticStorageAccountUri(), fetched.bootDiagnosticStorageAccountUri()); Assertions.assertEquals(original.computerName(), fetched.computerName()); Assertions.assertEquals(original.dataDisks().size(), fetched.dataDisks().size()); Assertions.assertEquals(original.extensions().size(), fetched.extensions().size()); Assertions.assertEquals(original.instanceId(), fetched.instanceId()); Assertions.assertEquals(original.isLatestScaleSetUpdateApplied(), fetched.isLatestScaleSetUpdateApplied()); Assertions.assertEquals(original.isLinuxPasswordAuthenticationEnabled(), fetched.isLinuxPasswordAuthenticationEnabled()); Assertions.assertEquals(original.isManagedDiskEnabled(), fetched.isManagedDiskEnabled()); Assertions.assertEquals(original.isOSBasedOnCustomImage(), fetched.isOSBasedOnCustomImage()); Assertions.assertEquals(original.isOSBasedOnPlatformImage(), fetched.isOSBasedOnPlatformImage()); Assertions.assertEquals(original.isOSBasedOnStoredImage(), fetched.isOSBasedOnStoredImage()); Assertions.assertEquals(original.isWindowsAutoUpdateEnabled(), fetched.isWindowsAutoUpdateEnabled()); Assertions.assertEquals(original.isWindowsVMAgentProvisioned(), original.isWindowsVMAgentProvisioned()); Assertions.assertEquals(original.networkInterfaceIds().size(), fetched.networkInterfaceIds().size()); Assertions.assertEquals(original.osDiskCachingType(), fetched.osDiskCachingType()); Assertions.assertEquals(original.osDiskId(), fetched.osDiskId()); Assertions.assertEquals(original.osDiskName(), fetched.osDiskName()); Assertions.assertEquals(original.osDiskSizeInGB(), fetched.osDiskSizeInGB()); Assertions.assertEquals(original.osType(), fetched.osType()); Assertions.assertEquals(original.osUnmanagedDiskVhdUri(), fetched.osUnmanagedDiskVhdUri()); Assertions.assertEquals(original.powerState(), fetched.powerState()); Assertions.assertEquals(original.primaryNetworkInterfaceId(), fetched.primaryNetworkInterfaceId()); Assertions.assertEquals(original.size(), fetched.size()); Assertions.assertEquals(original.sku().name(), fetched.sku().name()); Assertions.assertEquals(original.storedImageUnmanagedVhdUri(), fetched.storedImageUnmanagedVhdUri()); Assertions.assertEquals(original.unmanagedDataDisks().size(), fetched.unmanagedDataDisks().size()); Assertions.assertEquals(original.windowsTimeZone(), fetched.windowsTimeZone()); } private void checkVMInstances(VirtualMachineScaleSet vmScaleSet) { VirtualMachineScaleSetVMs virtualMachineScaleSetVMs = vmScaleSet.virtualMachines(); PagedIterable<VirtualMachineScaleSetVM> virtualMachines = virtualMachineScaleSetVMs.list(); Assertions.assertEquals(TestUtilities.getSize(virtualMachines), vmScaleSet.capacity()); Assertions.assertTrue(TestUtilities.getSize(virtualMachines) > 0); virtualMachineScaleSetVMs.updateInstances(virtualMachines.iterator().next().instanceId()); for (VirtualMachineScaleSetVM vm : virtualMachines) { Assertions.assertNotNull(vm.size()); Assertions.assertEquals(vm.osType(), OperatingSystemTypes.LINUX); Assertions.assertNotNull(vm.computerName().startsWith(vmScaleSet.computerNamePrefix())); Assertions.assertTrue(vm.isOSBasedOnPlatformImage()); Assertions.assertNull(vm.osDiskId()); Assertions.assertNotNull(vm.osUnmanagedDiskVhdUri()); Assertions.assertNull(vm.storedImageUnmanagedVhdUri()); Assertions.assertFalse(vm.isWindowsAutoUpdateEnabled()); Assertions.assertFalse(vm.isWindowsVMAgentProvisioned()); Assertions.assertTrue(vm.administratorUserName().equalsIgnoreCase("jvuser")); VirtualMachineImage vmImage = vm.getOSPlatformImage(); Assertions.assertNotNull(vmImage); Assertions.assertEquals(vm.extensions().size(), vmScaleSet.extensions().size()); Assertions.assertNotNull(vm.powerState()); vm.refreshInstanceView(); } VirtualMachineScaleSetVM virtualMachineScaleSetVM = virtualMachines.iterator().next(); Assertions.assertNotNull(virtualMachineScaleSetVM); virtualMachineScaleSetVM.restart(); virtualMachineScaleSetVM.powerOff(); virtualMachineScaleSetVM.refreshInstanceView(); Assertions.assertEquals(virtualMachineScaleSetVM.powerState(), PowerState.STOPPED); virtualMachineScaleSetVM.start(); for (VirtualMachineScaleSetVM vm : virtualMachines) { PagedIterable<VirtualMachineScaleSetNetworkInterface> nics = vmScaleSet.listNetworkInterfacesByInstanceId(vm.instanceId()); Assertions.assertNotNull(nics); Assertions.assertEquals(TestUtilities.getSize(nics), 1); VirtualMachineScaleSetNetworkInterface nic = nics.iterator().next(); Assertions.assertNotNull(nic.virtualMachineId()); Assertions.assertTrue(nic.virtualMachineId().toLowerCase().equalsIgnoreCase(vm.id())); Assertions.assertNotNull(vm.listNetworkInterfaces()); VirtualMachineScaleSetNetworkInterface nicA = vmScaleSet.getNetworkInterfaceByInstanceId(vm.instanceId(), nic.name()); Assertions.assertNotNull(nicA); VirtualMachineScaleSetNetworkInterface nicB = vm.getNetworkInterface(nic.name()); String nicIdB = vm.getNetworkInterfaceAsync(nic.name()).map(n -> nic.primaryIPConfiguration().networkId()).block(); Assertions.assertNotNull(nicB); Assertions.assertNotNull(nicIdB); } } @Test public void testVirtualMachineScaleSetSkuTypes() { rgName = null; VirtualMachineScaleSetSkuTypes skuType = VirtualMachineScaleSetSkuTypes.STANDARD_A0; Assertions.assertNull(skuType.sku().capacity()); Sku sku1 = skuType.sku(); Assertions.assertNull(sku1.capacity()); sku1.withCapacity(1L); Assertions.assertEquals(sku1.capacity().longValue(), 1); Assertions.assertNull(skuType.sku().capacity()); Sku sku2 = skuType.sku(); Assertions.assertNull(sku2.capacity()); sku2.withCapacity(2L); Assertions.assertEquals(sku2.capacity().longValue(), 2); Assertions.assertNull(skuType.sku().capacity()); Assertions.assertEquals(sku1.capacity().longValue(), 1); } @Test public void canDeleteVMSSInstance() throws Exception { String euapRegion = "eastus2euap"; final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName) .withRegion(euapRegion) .create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(euapRegion) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); VirtualMachineScaleSet vmss = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(euapRegion) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withoutPrimaryInternetFacingLoadBalancer() .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(4) .create(); Assertions.assertEquals(4, vmss.virtualMachines().list().stream().count()); List<String> firstTwoIds = vmss.virtualMachines().list().stream() .limit(2) .map(VirtualMachineScaleSetVM::instanceId) .collect(Collectors.toList()); vmss.virtualMachines().deleteInstances(firstTwoIds, true); Assertions.assertEquals(2, vmss.virtualMachines().list().stream().count()); vmss.virtualMachines().deleteInstances(Collections.singleton(vmss.virtualMachines().list().stream().findFirst().get().instanceId()), false); Assertions.assertEquals(1, vmss.virtualMachines().list().stream().count()); computeManager.virtualMachineScaleSets().deleteInstances(rgName, vmssName, Collections.singleton(vmss.virtualMachines().list().stream().findFirst().get().instanceId()), false); } }
Added one line test.
private void checkVMInstances(VirtualMachineScaleSet vmScaleSet) { VirtualMachineScaleSetVMs virtualMachineScaleSetVMs = vmScaleSet.virtualMachines(); PagedIterable<VirtualMachineScaleSetVM> virtualMachines = virtualMachineScaleSetVMs.list(); Assertions.assertEquals(TestUtilities.getSize(virtualMachines), vmScaleSet.capacity()); Assertions.assertTrue(TestUtilities.getSize(virtualMachines) > 0); virtualMachineScaleSetVMs.updateInstances(virtualMachines.iterator().next().instanceId()); for (VirtualMachineScaleSetVM vm : virtualMachines) { Assertions.assertNotNull(vm.size()); Assertions.assertEquals(vm.osType(), OperatingSystemTypes.LINUX); Assertions.assertNotNull(vm.computerName().startsWith(vmScaleSet.computerNamePrefix())); Assertions.assertTrue(vm.isOSBasedOnPlatformImage()); Assertions.assertNull(vm.osDiskId()); Assertions.assertNotNull(vm.osUnmanagedDiskVhdUri()); Assertions.assertNull(vm.storedImageUnmanagedVhdUri()); Assertions.assertFalse(vm.isWindowsAutoUpdateEnabled()); Assertions.assertFalse(vm.isWindowsVMAgentProvisioned()); Assertions.assertTrue(vm.administratorUserName().equalsIgnoreCase("jvuser")); VirtualMachineImage vmImage = vm.getOSPlatformImage(); Assertions.assertNotNull(vmImage); Assertions.assertEquals(vm.extensions().size(), vmScaleSet.extensions().size()); Assertions.assertNotNull(vm.powerState()); vm.refreshInstanceView(); } VirtualMachineScaleSetVM virtualMachineScaleSetVM = virtualMachines.iterator().next(); Assertions.assertNotNull(virtualMachineScaleSetVM); virtualMachineScaleSetVM.restart(); virtualMachineScaleSetVM.powerOff(); virtualMachineScaleSetVM.refreshInstanceView(); Assertions.assertEquals(virtualMachineScaleSetVM.powerState(), PowerState.STOPPED); virtualMachineScaleSetVM.start(); for (VirtualMachineScaleSetVM vm : virtualMachines) { PagedIterable<VirtualMachineScaleSetNetworkInterface> nics = vmScaleSet.listNetworkInterfacesByInstanceId(vm.instanceId()); Assertions.assertNotNull(nics); Assertions.assertEquals(TestUtilities.getSize(nics), 1); VirtualMachineScaleSetNetworkInterface nic = nics.iterator().next(); Assertions.assertNotNull(nic.virtualMachineId()); Assertions.assertTrue(nic.virtualMachineId().toLowerCase().equalsIgnoreCase(vm.id())); Assertions.assertNotNull(vm.listNetworkInterfaces()); VirtualMachineScaleSetNetworkInterface nicA = vmScaleSet.getNetworkInterfaceByInstanceId(vm.instanceId(), nic.name()); Assertions.assertNotNull(nicA); VirtualMachineScaleSetNetworkInterface nicB = vm.getNetworkInterface(nic.name()); String nicIdB = vm.getNetworkInterfaceAsync(nic.name()).map(n -> nic.primaryIPConfiguration().networkId()).block(); Assertions.assertNotNull(nicB); Assertions.assertNotNull(nicIdB); } }
Assertions.assertNotNull(nicIdB);
private void checkVMInstances(VirtualMachineScaleSet vmScaleSet) { VirtualMachineScaleSetVMs virtualMachineScaleSetVMs = vmScaleSet.virtualMachines(); PagedIterable<VirtualMachineScaleSetVM> virtualMachines = virtualMachineScaleSetVMs.list(); Assertions.assertEquals(TestUtilities.getSize(virtualMachines), vmScaleSet.capacity()); Assertions.assertTrue(TestUtilities.getSize(virtualMachines) > 0); virtualMachineScaleSetVMs.updateInstances(virtualMachines.iterator().next().instanceId()); for (VirtualMachineScaleSetVM vm : virtualMachines) { Assertions.assertNotNull(vm.size()); Assertions.assertEquals(vm.osType(), OperatingSystemTypes.LINUX); Assertions.assertNotNull(vm.computerName().startsWith(vmScaleSet.computerNamePrefix())); Assertions.assertTrue(vm.isOSBasedOnPlatformImage()); Assertions.assertNull(vm.osDiskId()); Assertions.assertNotNull(vm.osUnmanagedDiskVhdUri()); Assertions.assertNull(vm.storedImageUnmanagedVhdUri()); Assertions.assertFalse(vm.isWindowsAutoUpdateEnabled()); Assertions.assertFalse(vm.isWindowsVMAgentProvisioned()); Assertions.assertTrue(vm.administratorUserName().equalsIgnoreCase("jvuser")); VirtualMachineImage vmImage = vm.getOSPlatformImage(); Assertions.assertNotNull(vmImage); Assertions.assertEquals(vm.extensions().size(), vmScaleSet.extensions().size()); Assertions.assertNotNull(vm.powerState()); vm.refreshInstanceView(); } VirtualMachineScaleSetVM virtualMachineScaleSetVM = virtualMachines.iterator().next(); Assertions.assertNotNull(virtualMachineScaleSetVM); virtualMachineScaleSetVM.restart(); virtualMachineScaleSetVM.powerOff(); virtualMachineScaleSetVM.refreshInstanceView(); Assertions.assertEquals(virtualMachineScaleSetVM.powerState(), PowerState.STOPPED); virtualMachineScaleSetVM.start(); for (VirtualMachineScaleSetVM vm : virtualMachines) { PagedIterable<VirtualMachineScaleSetNetworkInterface> nics = vmScaleSet.listNetworkInterfacesByInstanceId(vm.instanceId()); Assertions.assertNotNull(nics); Assertions.assertEquals(TestUtilities.getSize(nics), 1); VirtualMachineScaleSetNetworkInterface nic = nics.iterator().next(); Assertions.assertNotNull(nic.virtualMachineId()); Assertions.assertTrue(nic.virtualMachineId().toLowerCase().equalsIgnoreCase(vm.id())); Assertions.assertNotNull(vm.listNetworkInterfaces()); VirtualMachineScaleSetNetworkInterface nicA = vmScaleSet.getNetworkInterfaceByInstanceId(vm.instanceId(), nic.name()); Assertions.assertNotNull(nicA); VirtualMachineScaleSetNetworkInterface nicB = vm.getNetworkInterface(nic.name()); String nicIdB = vm.getNetworkInterfaceAsync(nic.name()).map(n -> nic.primaryIPConfiguration().networkId()).block(); Assertions.assertNotNull(nicB); Assertions.assertNotNull(nicIdB); } }
class VirtualMachineScaleSetOperationsTests extends ComputeManagementTest { private String rgName = ""; private final Region region = Region.US_WEST; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { rgName = generateRandomResourceName("javacsmrg", 15); super.initializeClients(httpPipeline, profile); } @Override protected void cleanUpResources() { if (rgName != null) { resourceManager.resourceGroups().beginDeleteByName(rgName); } } @Test public void canCreateVMSSWithPlan() { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); final String uname = "jvuser"; Network network = this .networkManager .networks() .define(generateRandomResourceName("vmssvnet", 15)) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); PurchasePlan plan = new PurchasePlan() .withName("access_server_byol") .withPublisher("openvpn") .withProduct("openvpnas"); ImageReference imageReference = new ImageReference() .withPublisher("openvpn") .withOffer("openvpnas") .withSku("access_server_byol") .withVersion("latest"); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withoutPrimaryInternetFacingLoadBalancer() .withoutPrimaryInternalLoadBalancer() .withSpecificLinuxImageVersion(imageReference) .withRootUsername(uname) .withSsh(sshPublicKey()) .withNewDataDisk(1) .withPlan(plan) .create(); VirtualMachineScaleSet currentVirtualMachineScaleSet = this.computeManager.virtualMachineScaleSets().getByResourceGroup(rgName, vmssName); Assertions.assertEquals("access_server_byol", currentVirtualMachineScaleSet.plan().name()); Assertions.assertEquals("openvpn", currentVirtualMachineScaleSet.plan().publisher()); Assertions.assertEquals("openvpnas", currentVirtualMachineScaleSet.plan().product()); } @Test public void canUpdateVirtualMachineScaleSetWithExtensionProtectedSettings() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); final String uname = "jvuser"; final String password = password(); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); StorageAccount storageAccount = this .storageManager .storageAccounts() .define(generateRandomResourceName("stg", 15)) .withRegion(region) .withExistingResourceGroup(resourceGroup) .create(); List<StorageAccountKey> keys = storageAccount.getKeys(); Assertions.assertNotNull(keys); Assertions.assertTrue(keys.size() > 0); String storageAccountKey = keys.get(0).value(); Network network = this .networkManager .networks() .define(generateRandomResourceName("vmssvnet", 15)) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withoutPrimaryInternetFacingLoadBalancer() .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername(uname) .withSsh(sshPublicKey()) .withUnmanagedDisks() .withNewStorageAccount(generateRandomResourceName("stg", 15)) .withExistingStorageAccount(storageAccount) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("commandToExecute", "ls") .withProtectedSetting("storageAccountName", storageAccount.name()) .withProtectedSetting("storageAccountKey", storageAccountKey) .attach() .create(); Map<String, VirtualMachineScaleSetExtension> extensions = virtualMachineScaleSet.extensions(); Assertions.assertNotNull(extensions); Assertions.assertTrue(extensions.size() > 0); Assertions.assertTrue(extensions.containsKey("CustomScriptForLinux")); VirtualMachineScaleSetExtension extension = extensions.get("CustomScriptForLinux"); Assertions.assertNotNull(extension.publicSettings()); Assertions.assertEquals(1, extension.publicSettings().size()); Assertions.assertNotNull(extension.publicSettingsAsJsonString()); VirtualMachineScaleSet scaleSet = this.computeManager.virtualMachineScaleSets().getById(virtualMachineScaleSet.id()); extensions = scaleSet.extensions(); Assertions.assertNotNull(extensions); Assertions.assertTrue(extensions.size() > 0); Assertions.assertTrue(extensions.containsKey("CustomScriptForLinux")); extension = extensions.get("CustomScriptForLinux"); Assertions.assertNotNull(extension.publicSettings()); Assertions.assertEquals(1, extension.publicSettings().size()); Assertions.assertNotNull(extension.publicSettingsAsJsonString()); int newCapacity = scaleSet.capacity() + 1; virtualMachineScaleSet.update().withCapacity(newCapacity).apply(); extensions = virtualMachineScaleSet.extensions(); Assertions.assertNotNull(extensions); Assertions.assertTrue(extensions.size() > 0); Assertions.assertTrue(extensions.containsKey("CustomScriptForLinux")); extension = extensions.get("CustomScriptForLinux"); Assertions.assertNotNull(extension.publicSettings()); Assertions.assertEquals(1, extension.publicSettings().size()); Assertions.assertNotNull(extension.publicSettingsAsJsonString()); } @Test public void canCreateVirtualMachineScaleSetWithCustomScriptExtension() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); final String uname = "jvuser"; final String password = password(); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define(generateRandomResourceName("vmssvnet", 15)) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, resourceGroup, "1"); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername(uname) .withSsh(sshPublicKey()) .withUnmanagedDisks() .withNewStorageAccount(generateRandomResourceName("stg", 15)) .withNewStorageAccount(generateRandomResourceName("stg", 15)) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("commandToExecute", "ls") .attach() .withUpgradeMode(UpgradeMode.MANUAL) .create(); checkVMInstances(virtualMachineScaleSet); List<String> publicIPAddressIds = virtualMachineScaleSet.primaryPublicIpAddressIds(); PublicIpAddress publicIPAddress = this.networkManager.publicIpAddresses().getById(publicIPAddressIds.get(0)); String fqdn = publicIPAddress.fqdn(); Assertions.assertNotNull(fqdn); for (VirtualMachineScaleSetVM vm : virtualMachineScaleSet.virtualMachines().list()) { PagedIterable<VirtualMachineScaleSetNetworkInterface> networkInterfaces = vm.listNetworkInterfaces(); Assertions.assertEquals(TestUtilities.getSize(networkInterfaces), 1); VirtualMachineScaleSetNetworkInterface networkInterface = networkInterfaces.iterator().next(); VirtualMachineScaleSetNicIpConfiguration primaryIpConfig = null; primaryIpConfig = networkInterface.primaryIPConfiguration(); Assertions.assertNotNull(primaryIpConfig); Integer sshFrontendPort = null; List<LoadBalancerInboundNatRule> natRules = primaryIpConfig.listAssociatedLoadBalancerInboundNatRules(); for (LoadBalancerInboundNatRule natRule : natRules) { if (natRule.backendPort() == 22) { sshFrontendPort = natRule.frontendPort(); break; } } Assertions.assertNotNull(sshFrontendPort); } } @Test public void canCreateVirtualMachineScaleSetWithOptionalNetworkSettings() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); final String vmssVmDnsLabel = generateRandomResourceName("pip", 10); final String nsgName = generateRandomResourceName("nsg", 10); final String asgName = generateRandomResourceName("asg", 8); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); ApplicationSecurityGroup asg = this .networkManager .applicationSecurityGroups() .define(asgName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .create(); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_D3_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withoutPrimaryInternetFacingLoadBalancer() .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withVirtualMachinePublicIp(vmssVmDnsLabel) .withExistingApplicationSecurityGroup(asg) .create(); VirtualMachineScaleSetPublicIpAddressConfiguration currentIpConfig = virtualMachineScaleSet.virtualMachinePublicIpConfig(); Assertions.assertNotNull(currentIpConfig); Assertions.assertNotNull(currentIpConfig.dnsSettings()); Assertions.assertNotNull(currentIpConfig.dnsSettings().domainNameLabel()); currentIpConfig.withIdleTimeoutInMinutes(20); virtualMachineScaleSet.update().withVirtualMachinePublicIp(currentIpConfig).apply(); currentIpConfig = virtualMachineScaleSet.virtualMachinePublicIpConfig(); Assertions.assertNotNull(currentIpConfig); Assertions.assertNotNull(currentIpConfig.idleTimeoutInMinutes()); Assertions.assertEquals((long) 20, (long) currentIpConfig.idleTimeoutInMinutes()); virtualMachineScaleSet.refresh(); currentIpConfig = virtualMachineScaleSet.virtualMachinePublicIpConfig(); Assertions.assertNotNull(currentIpConfig); Assertions.assertNotNull(currentIpConfig.idleTimeoutInMinutes()); Assertions.assertEquals((long) 20, (long) currentIpConfig.idleTimeoutInMinutes()); List<String> asgIds = virtualMachineScaleSet.applicationSecurityGroupIds(); Assertions.assertNotNull(asgIds); Assertions.assertEquals(1, asgIds.size()); NetworkSecurityGroup nsg = networkManager .networkSecurityGroups() .define(nsgName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .defineRule("rule1") .allowOutbound() .fromAnyAddress() .fromPort(80) .toAnyAddress() .toPort(80) .withProtocol(SecurityRuleProtocol.TCP) .attach() .create(); virtualMachineScaleSet.deallocate(); virtualMachineScaleSet .update() .withIpForwarding() .withAcceleratedNetworking() .withExistingNetworkSecurityGroup(nsg) .apply(); Assertions.assertTrue(virtualMachineScaleSet.isIpForwardingEnabled()); Assertions.assertTrue(virtualMachineScaleSet.isAcceleratedNetworkingEnabled()); Assertions.assertNotNull(virtualMachineScaleSet.networkSecurityGroupId()); virtualMachineScaleSet.refresh(); Assertions.assertTrue(virtualMachineScaleSet.isIpForwardingEnabled()); Assertions.assertTrue(virtualMachineScaleSet.isAcceleratedNetworkingEnabled()); Assertions.assertNotNull(virtualMachineScaleSet.networkSecurityGroupId()); virtualMachineScaleSet .update() .withoutIpForwarding() .withoutAcceleratedNetworking() .withoutNetworkSecurityGroup() .apply(); Assertions.assertFalse(virtualMachineScaleSet.isIpForwardingEnabled()); Assertions.assertFalse(virtualMachineScaleSet.isAcceleratedNetworkingEnabled()); Assertions.assertNull(virtualMachineScaleSet.networkSecurityGroupId()); } @Test @Disabled("Mock framework doesn't support data plane") public void canCreateVirtualMachineScaleSetWithSecret() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); final String vaultName = generateRandomResourceName("vlt", 10); final String secretName = generateRandomResourceName("srt", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region, resourceGroup, "1", LoadBalancerSkuType.BASIC); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); Vault vault = this .keyVaultManager .vaults() .define(vaultName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .defineAccessPolicy() .forServicePrincipal(clientIdFromFile()) .allowSecretAllPermissions() .attach() .withDeploymentEnabled() .create(); final InputStream embeddedJsonConfig = VirtualMachineExtensionOperationsTests.class.getResourceAsStream("/myTest.txt"); String secretValue = IOUtils.toString(embeddedJsonConfig, StandardCharsets.UTF_8); Secret secret = vault.secrets().define(secretName).withValue(secretValue).create(); List<VaultCertificate> certs = new ArrayList<>(); certs.add(new VaultCertificate().withCertificateUrl(secret.id())); List<VaultSecretGroup> group = new ArrayList<>(); group .add( new VaultSecretGroup() .withSourceVault(new SubResource().withId(vault.id())) .withVaultCertificates(certs)); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withSecrets(group) .withNewStorageAccount(generateRandomResourceName("stg", 15)) .withNewStorageAccount(generateRandomResourceName("stg3", 15)) .withUpgradeMode(UpgradeMode.MANUAL) .create(); for (VirtualMachineScaleSetVM vm : virtualMachineScaleSet.virtualMachines().list()) { Assertions.assertTrue(vm.osProfile().secrets().size() > 0); } virtualMachineScaleSet.update().withoutSecrets().apply(); for (VirtualMachineScaleSetVM vm : virtualMachineScaleSet.virtualMachines().list()) { Assertions.assertTrue(vm.osProfile().secrets().size() == 0); } } public void canCreateVirtualMachineScaleSet() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region, resourceGroup, "1", LoadBalancerSkuType.BASIC); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withNewStorageAccount(generateRandomResourceName("stg", 15)) .withNewStorageAccount(generateRandomResourceName("stg", 15)) .create(); Assertions.assertNull(virtualMachineScaleSet.getPrimaryInternalLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternalLoadBalancerBackends().size() == 0); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternalLoadBalancerInboundNatPools().size() == 0); Assertions.assertNotNull(virtualMachineScaleSet.getPrimaryInternetFacingLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerBackends().size() == 2); Assertions .assertTrue(virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerInboundNatPools().size() == 2); Network primaryNetwork = virtualMachineScaleSet.getPrimaryNetwork(); Assertions.assertNotNull(primaryNetwork.id()); PagedIterable<VirtualMachineScaleSetNetworkInterface> nics = virtualMachineScaleSet.listNetworkInterfaces(); int nicCount = 0; for (VirtualMachineScaleSetNetworkInterface nic : nics) { nicCount++; Assertions.assertNotNull(nic.id()); Assertions .assertTrue(nic.virtualMachineId().toLowerCase().startsWith(virtualMachineScaleSet.id().toLowerCase())); Assertions.assertNotNull(nic.macAddress()); Assertions.assertNotNull(nic.dnsServers()); Assertions.assertNotNull(nic.appliedDnsServers()); Map<String, VirtualMachineScaleSetNicIpConfiguration> ipConfigs = nic.ipConfigurations(); Assertions.assertEquals(ipConfigs.size(), 1); for (Map.Entry<String, VirtualMachineScaleSetNicIpConfiguration> entry : ipConfigs.entrySet()) { VirtualMachineScaleSetNicIpConfiguration ipConfig = entry.getValue(); Assertions.assertNotNull(ipConfig); Assertions.assertTrue(ipConfig.isPrimary()); Assertions.assertNotNull(ipConfig.subnetName()); Assertions.assertTrue(primaryNetwork.id().toLowerCase().equalsIgnoreCase(ipConfig.networkId())); Assertions.assertNotNull(ipConfig.privateIpAddress()); Assertions.assertNotNull(ipConfig.privateIpAddressVersion()); Assertions.assertNotNull(ipConfig.privateIpAllocationMethod()); List<LoadBalancerBackend> lbBackends = ipConfig.listAssociatedLoadBalancerBackends(); Assertions.assertEquals(lbBackends.size(), 2); for (LoadBalancerBackend lbBackend : lbBackends) { Map<String, LoadBalancingRule> lbRules = lbBackend.loadBalancingRules(); Assertions.assertEquals(lbRules.size(), 1); for (Map.Entry<String, LoadBalancingRule> ruleEntry : lbRules.entrySet()) { LoadBalancingRule rule = ruleEntry.getValue(); Assertions.assertNotNull(rule); Assertions .assertTrue( (rule.frontendPort() == 80 && rule.backendPort() == 80) || (rule.frontendPort() == 443 && rule.backendPort() == 443)); } } List<LoadBalancerInboundNatRule> lbNatRules = ipConfig.listAssociatedLoadBalancerInboundNatRules(); Assertions.assertEquals(lbNatRules.size(), 2); for (LoadBalancerInboundNatRule lbNatRule : lbNatRules) { Assertions .assertTrue( (lbNatRule.frontendPort() >= 5000 && lbNatRule.frontendPort() <= 5099) || (lbNatRule.frontendPort() >= 6000 && lbNatRule.frontendPort() <= 6099)); Assertions.assertTrue(lbNatRule.backendPort() == 22 || lbNatRule.backendPort() == 23); } } } Assertions.assertTrue(nicCount > 0); Assertions.assertEquals(virtualMachineScaleSet.vhdContainers().size(), 2); Assertions.assertEquals(virtualMachineScaleSet.sku(), VirtualMachineScaleSetSkuTypes.STANDARD_A0); Assertions.assertTrue(virtualMachineScaleSet.upgradeModel() == UpgradeMode.AUTOMATIC); Assertions.assertEquals(virtualMachineScaleSet.capacity(), 2); primaryNetwork = virtualMachineScaleSet.getPrimaryNetwork(); String inboundNatPoolToRemove = null; for (String inboundNatPoolName : virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerInboundNatPools().keySet()) { inboundNatPoolToRemove = inboundNatPoolName; break; } LoadBalancer internalLoadBalancer = createInternalLoadBalancer(region, resourceGroup, primaryNetwork, "1"); virtualMachineScaleSet .update() .withExistingPrimaryInternalLoadBalancer(internalLoadBalancer) .withoutPrimaryInternetFacingLoadBalancerNatPools(inboundNatPoolToRemove) .apply(); virtualMachineScaleSet = this.computeManager.virtualMachineScaleSets().getByResourceGroup(rgName, vmssName); Assertions.assertNotNull(virtualMachineScaleSet.getPrimaryInternetFacingLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerBackends().size() == 2); Assertions .assertTrue(virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerInboundNatPools().size() == 1); Assertions.assertNotNull(virtualMachineScaleSet.getPrimaryInternalLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternalLoadBalancerBackends().size() == 2); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternalLoadBalancerInboundNatPools().size() == 2); nics = virtualMachineScaleSet.listNetworkInterfaces(); nicCount = 0; for (VirtualMachineScaleSetNetworkInterface nic : nics) { nicCount++; Map<String, VirtualMachineScaleSetNicIpConfiguration> ipConfigs = nic.ipConfigurations(); Assertions.assertEquals(ipConfigs.size(), 1); for (Map.Entry<String, VirtualMachineScaleSetNicIpConfiguration> entry : ipConfigs.entrySet()) { VirtualMachineScaleSetNicIpConfiguration ipConfig = entry.getValue(); Assertions.assertNotNull(ipConfig); List<LoadBalancerBackend> lbBackends = ipConfig.listAssociatedLoadBalancerBackends(); Assertions.assertNotNull(lbBackends); for (LoadBalancerBackend lbBackend : lbBackends) { Map<String, LoadBalancingRule> lbRules = lbBackend.loadBalancingRules(); Assertions.assertEquals(lbRules.size(), 1); for (Map.Entry<String, LoadBalancingRule> ruleEntry : lbRules.entrySet()) { LoadBalancingRule rule = ruleEntry.getValue(); Assertions.assertNotNull(rule); Assertions .assertTrue( (rule.frontendPort() == 80 && rule.backendPort() == 80) || (rule.frontendPort() == 443 && rule.backendPort() == 443) || (rule.frontendPort() == 1000 && rule.backendPort() == 1000) || (rule.frontendPort() == 1001 && rule.backendPort() == 1001)); } } List<LoadBalancerInboundNatRule> lbNatRules = ipConfig.listAssociatedLoadBalancerInboundNatRules(); for (LoadBalancerInboundNatRule lbNatRule : lbNatRules) { Assertions .assertTrue( (lbNatRule.frontendPort() >= 6000 && lbNatRule.frontendPort() <= 6099) || (lbNatRule.frontendPort() >= 5000 && lbNatRule.frontendPort() <= 5099) || (lbNatRule.frontendPort() >= 8000 && lbNatRule.frontendPort() <= 8099) || (lbNatRule.frontendPort() >= 9000 && lbNatRule.frontendPort() <= 9099)); Assertions .assertTrue( lbNatRule.backendPort() == 23 || lbNatRule.backendPort() == 22 || lbNatRule.backendPort() == 44 || lbNatRule.backendPort() == 45); } } } Assertions.assertTrue(nicCount > 0); } /* * Previously name * canCreateTwoRegionalVirtualMachineScaleSetsAndAssociateEachWithDifferentBackendPoolOfZoneResilientLoadBalancer * but this was too long for some OSes and would cause git checkout to fail. */ @Test public void canCreateTwoRegionalVMScaleSetsWithDifferentPoolOfZoneResilientLoadBalancer() throws Exception { Region region2 = Region.US_EAST2; ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region2).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region2) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region2, resourceGroup, "1", LoadBalancerSkuType.STANDARD); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); List<String> natpools = new ArrayList<>(); for (String natPool : publicLoadBalancer.inboundNatPools().keySet()) { natpools.add(natPool); } Assertions.assertTrue(natpools.size() == 2); final String vmssName1 = generateRandomResourceName("vmss1", 10); VirtualMachineScaleSet virtualMachineScaleSet1 = this .computeManager .virtualMachineScaleSets() .define(vmssName1) .withRegion(region2) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0)) .withPrimaryInternetFacingLoadBalancerInboundNatPools(natpools.get(0)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); final String vmssName2 = generateRandomResourceName("vmss2", 10); VirtualMachineScaleSet virtualMachineScaleSet2 = this .computeManager .virtualMachineScaleSets() .define(vmssName2) .withRegion(region2) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(1)) .withPrimaryInternetFacingLoadBalancerInboundNatPools(natpools.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); Assertions.assertNull(virtualMachineScaleSet1.getPrimaryInternalLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet1.listPrimaryInternalLoadBalancerBackends().size() == 0); Assertions.assertTrue(virtualMachineScaleSet1.listPrimaryInternalLoadBalancerInboundNatPools().size() == 0); Assertions.assertNotNull(virtualMachineScaleSet1.getPrimaryInternetFacingLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet1.listPrimaryInternetFacingLoadBalancerBackends().size() == 1); Assertions.assertNull(virtualMachineScaleSet2.getPrimaryInternalLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet2.listPrimaryInternalLoadBalancerBackends().size() == 0); Assertions.assertTrue(virtualMachineScaleSet2.listPrimaryInternalLoadBalancerInboundNatPools().size() == 0); Assertions.assertNotNull(virtualMachineScaleSet2.getPrimaryInternetFacingLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet2.listPrimaryInternetFacingLoadBalancerBackends().size() == 1); } @Test public void canCreateZoneRedundantVirtualMachineScaleSetWithZoneResilientLoadBalancer() throws Exception { Region region2 = Region.US_EAST2; ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region2).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region2) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region2, resourceGroup, "1", LoadBalancerSkuType.STANDARD); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); final String vmssName = generateRandomResourceName("vmss", 10); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region2) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_D3_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withAvailabilityZone(AvailabilityZoneId.ZONE_1) .withAvailabilityZone(AvailabilityZoneId.ZONE_2) .create(); Assertions.assertNotNull(virtualMachineScaleSet.availabilityZones()); Assertions.assertEquals(2, virtualMachineScaleSet.availabilityZones().size()); Assertions.assertNull(virtualMachineScaleSet.getPrimaryInternalLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternalLoadBalancerBackends().size() == 0); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternalLoadBalancerInboundNatPools().size() == 0); Assertions.assertNotNull(virtualMachineScaleSet.getPrimaryInternetFacingLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerBackends().size() == 2); Assertions .assertTrue(virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerInboundNatPools().size() == 2); Network primaryNetwork = virtualMachineScaleSet.getPrimaryNetwork(); Assertions.assertNotNull(primaryNetwork.id()); } @Test public void canEnableMSIOnVirtualMachineScaleSetWithoutRoleAssignment() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region, resourceGroup, "1", LoadBalancerSkuType.BASIC); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withSystemAssignedManagedServiceIdentity() .create(); PagedIterable<RoleAssignment> rgRoleAssignments = authorizationManager.roleAssignments().listByScope(resourceGroup.id()); Assertions.assertNotNull(rgRoleAssignments); boolean found = false; for (RoleAssignment roleAssignment : rgRoleAssignments) { if (roleAssignment.principalId() != null && roleAssignment .principalId() .equalsIgnoreCase(virtualMachineScaleSet.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions .assertFalse( found, "Resource group should not have a role assignment with virtual machine scale set MSI principal"); } @Test public void canEnableMSIOnVirtualMachineScaleSetWithMultipleRoleAssignment() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region, resourceGroup, "1", LoadBalancerSkuType.BASIC); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); StorageAccount storageAccount = this .storageManager .storageAccounts() .define(generateRandomResourceName("jvcsrg", 10)) .withRegion(region) .withExistingResourceGroup(resourceGroup) .create(); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withSystemAssignedManagedServiceIdentity() .withSystemAssignedIdentityBasedAccessToCurrentResourceGroup(BuiltInRole.CONTRIBUTOR) .withSystemAssignedIdentityBasedAccessTo(storageAccount.id(), BuiltInRole.CONTRIBUTOR) .create(); Assertions.assertNotNull(virtualMachineScaleSet.managedServiceIdentityType()); Assertions .assertTrue( virtualMachineScaleSet.managedServiceIdentityType().equals(ResourceIdentityType.SYSTEM_ASSIGNED)); PagedIterable<RoleAssignment> rgRoleAssignments = authorizationManager.roleAssignments().listByScope(resourceGroup.id()); Assertions.assertNotNull(rgRoleAssignments); boolean found = false; for (RoleAssignment roleAssignment : rgRoleAssignments) { if (roleAssignment.principalId() != null && roleAssignment .principalId() .equalsIgnoreCase(virtualMachineScaleSet.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions .assertTrue( found, "Resource group should have a role assignment with virtual machine scale set MSI principal"); PagedIterable<RoleAssignment> stgRoleAssignments = authorizationManager.roleAssignments().listByScope(storageAccount.id()); Assertions.assertNotNull(stgRoleAssignments); found = false; for (RoleAssignment roleAssignment : stgRoleAssignments) { if (roleAssignment.principalId() != null && roleAssignment .principalId() .equalsIgnoreCase(virtualMachineScaleSet.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions .assertTrue( found, "Storage account should have a role assignment with virtual machine scale set MSI principal"); } @Test public void canGetSingleVMSSInstance() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region, resourceGroup, "1", LoadBalancerSkuType.BASIC); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withNewStorageAccount(generateRandomResourceName("stg", 15)) .withNewStorageAccount(generateRandomResourceName("stg3", 15)) .withUpgradeMode(UpgradeMode.MANUAL) .create(); VirtualMachineScaleSet virtualMachineScaleSet = this.computeManager.virtualMachineScaleSets().getByResourceGroup(rgName, vmssName); VirtualMachineScaleSetVMs virtualMachineScaleSetVMs = virtualMachineScaleSet.virtualMachines(); VirtualMachineScaleSetVM firstVm = virtualMachineScaleSetVMs.list().iterator().next(); VirtualMachineScaleSetVM fetchedVm = virtualMachineScaleSetVMs.getInstance(firstVm.instanceId()); this.checkVmsEqual(firstVm, fetchedVm); VirtualMachineScaleSetVM fetchedAsyncVm = virtualMachineScaleSetVMs.getInstanceAsync(firstVm.instanceId()).block(); this.checkVmsEqual(firstVm, fetchedAsyncVm); } @Test public void canCreateLowPriorityVMSSInstance() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region, resourceGroup, "1", LoadBalancerSkuType.STANDARD); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); VirtualMachineScaleSet vmss = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_D3_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withNewStorageAccount(generateRandomResourceName("stg", 15)) .withNewStorageAccount(generateRandomResourceName("stg3", 15)) .withUpgradeMode(UpgradeMode.MANUAL) .withLowPriorityVirtualMachine(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withMaxPrice(-1.0) .create(); Assertions.assertEquals(vmss.virtualMachinePriority(), VirtualMachinePriorityTypes.LOW); Assertions.assertEquals(vmss.virtualMachineEvictionPolicy(), VirtualMachineEvictionPolicyTypes.DEALLOCATE); Assertions.assertEquals(vmss.billingProfile().maxPrice(), (Double) (-1.0)); vmss.update().withMaxPrice(2000.0).apply(); Assertions.assertEquals(vmss.billingProfile().maxPrice(), (Double) 2000.0); } @Test public void canPerformSimulateEvictionOnSpotVMSSInstance() { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups() .define(rgName) .withRegion(region) .create(); Network network = this.networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); VirtualMachineScaleSet vmss = computeManager.virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_D3_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withoutPrimaryInternetFacingLoadBalancer() .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withSpotPriorityVirtualMachine(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .create(); PagedIterable<VirtualMachineScaleSetVM> vmInstances = vmss.virtualMachines().list(); for (VirtualMachineScaleSetVM instance: vmInstances) { Assertions.assertTrue(instance.osDiskSizeInGB() > 0); vmss.virtualMachines().simulateEviction(instance.instanceId()); } boolean deallocated = false; int pollIntervalInMinutes = 5; for (int i = 0; i < 30; i += pollIntervalInMinutes) { ResourceManagerUtils.sleep(Duration.ofMinutes(pollIntervalInMinutes)); deallocated = true; for (VirtualMachineScaleSetVM instance: vmInstances) { instance.refresh(); if (instance.powerState() != PowerState.DEALLOCATED) { deallocated = false; } } if (deallocated) { break; } } Assertions.assertTrue(deallocated); for (VirtualMachineScaleSetVM instance: vmInstances) { instance.refresh(); Assertions.assertEquals(0, instance.osDiskSizeInGB()); } } private void checkVmsEqual(VirtualMachineScaleSetVM original, VirtualMachineScaleSetVM fetched) { Assertions.assertEquals(original.administratorUserName(), fetched.administratorUserName()); Assertions.assertEquals(original.availabilitySetId(), fetched.availabilitySetId()); Assertions.assertEquals(original.bootDiagnosticEnabled(), fetched.bootDiagnosticEnabled()); Assertions.assertEquals(original.bootDiagnosticStorageAccountUri(), fetched.bootDiagnosticStorageAccountUri()); Assertions.assertEquals(original.computerName(), fetched.computerName()); Assertions.assertEquals(original.dataDisks().size(), fetched.dataDisks().size()); Assertions.assertEquals(original.extensions().size(), fetched.extensions().size()); Assertions.assertEquals(original.instanceId(), fetched.instanceId()); Assertions.assertEquals(original.isLatestScaleSetUpdateApplied(), fetched.isLatestScaleSetUpdateApplied()); Assertions.assertEquals(original.isLinuxPasswordAuthenticationEnabled(), fetched.isLinuxPasswordAuthenticationEnabled()); Assertions.assertEquals(original.isManagedDiskEnabled(), fetched.isManagedDiskEnabled()); Assertions.assertEquals(original.isOSBasedOnCustomImage(), fetched.isOSBasedOnCustomImage()); Assertions.assertEquals(original.isOSBasedOnPlatformImage(), fetched.isOSBasedOnPlatformImage()); Assertions.assertEquals(original.isOSBasedOnStoredImage(), fetched.isOSBasedOnStoredImage()); Assertions.assertEquals(original.isWindowsAutoUpdateEnabled(), fetched.isWindowsAutoUpdateEnabled()); Assertions.assertEquals(original.isWindowsVMAgentProvisioned(), original.isWindowsVMAgentProvisioned()); Assertions.assertEquals(original.networkInterfaceIds().size(), fetched.networkInterfaceIds().size()); Assertions.assertEquals(original.osDiskCachingType(), fetched.osDiskCachingType()); Assertions.assertEquals(original.osDiskId(), fetched.osDiskId()); Assertions.assertEquals(original.osDiskName(), fetched.osDiskName()); Assertions.assertEquals(original.osDiskSizeInGB(), fetched.osDiskSizeInGB()); Assertions.assertEquals(original.osType(), fetched.osType()); Assertions.assertEquals(original.osUnmanagedDiskVhdUri(), fetched.osUnmanagedDiskVhdUri()); Assertions.assertEquals(original.powerState(), fetched.powerState()); Assertions.assertEquals(original.primaryNetworkInterfaceId(), fetched.primaryNetworkInterfaceId()); Assertions.assertEquals(original.size(), fetched.size()); Assertions.assertEquals(original.sku().name(), fetched.sku().name()); Assertions.assertEquals(original.storedImageUnmanagedVhdUri(), fetched.storedImageUnmanagedVhdUri()); Assertions.assertEquals(original.unmanagedDataDisks().size(), fetched.unmanagedDataDisks().size()); Assertions.assertEquals(original.windowsTimeZone(), fetched.windowsTimeZone()); } @Test public void testVirtualMachineScaleSetSkuTypes() { rgName = null; VirtualMachineScaleSetSkuTypes skuType = VirtualMachineScaleSetSkuTypes.STANDARD_A0; Assertions.assertNull(skuType.sku().capacity()); Sku sku1 = skuType.sku(); Assertions.assertNull(sku1.capacity()); sku1.withCapacity(1L); Assertions.assertEquals(sku1.capacity().longValue(), 1); Assertions.assertNull(skuType.sku().capacity()); Sku sku2 = skuType.sku(); Assertions.assertNull(sku2.capacity()); sku2.withCapacity(2L); Assertions.assertEquals(sku2.capacity().longValue(), 2); Assertions.assertNull(skuType.sku().capacity()); Assertions.assertEquals(sku1.capacity().longValue(), 1); } @Test public void canDeleteVMSSInstance() throws Exception { String euapRegion = "eastus2euap"; final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName) .withRegion(euapRegion) .create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(euapRegion) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); VirtualMachineScaleSet vmss = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(euapRegion) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withoutPrimaryInternetFacingLoadBalancer() .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(4) .create(); Assertions.assertEquals(4, vmss.virtualMachines().list().stream().count()); List<String> firstTwoIds = vmss.virtualMachines().list().stream() .limit(2) .map(VirtualMachineScaleSetVM::instanceId) .collect(Collectors.toList()); vmss.virtualMachines().deleteInstances(firstTwoIds, true); Assertions.assertEquals(2, vmss.virtualMachines().list().stream().count()); vmss.virtualMachines().deleteInstances(Collections.singleton(vmss.virtualMachines().list().stream().findFirst().get().instanceId()), false); Assertions.assertEquals(1, vmss.virtualMachines().list().stream().count()); computeManager.virtualMachineScaleSets().deleteInstances(rgName, vmssName, Collections.singleton(vmss.virtualMachines().list().stream().findFirst().get().instanceId()), false); } }
class VirtualMachineScaleSetOperationsTests extends ComputeManagementTest { private String rgName = ""; private final Region region = Region.US_WEST; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { rgName = generateRandomResourceName("javacsmrg", 15); super.initializeClients(httpPipeline, profile); } @Override protected void cleanUpResources() { if (rgName != null) { resourceManager.resourceGroups().beginDeleteByName(rgName); } } @Test public void canCreateVMSSWithPlan() { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); final String uname = "jvuser"; Network network = this .networkManager .networks() .define(generateRandomResourceName("vmssvnet", 15)) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); PurchasePlan plan = new PurchasePlan() .withName("access_server_byol") .withPublisher("openvpn") .withProduct("openvpnas"); ImageReference imageReference = new ImageReference() .withPublisher("openvpn") .withOffer("openvpnas") .withSku("access_server_byol") .withVersion("latest"); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withoutPrimaryInternetFacingLoadBalancer() .withoutPrimaryInternalLoadBalancer() .withSpecificLinuxImageVersion(imageReference) .withRootUsername(uname) .withSsh(sshPublicKey()) .withNewDataDisk(1) .withPlan(plan) .create(); VirtualMachineScaleSet currentVirtualMachineScaleSet = this.computeManager.virtualMachineScaleSets().getByResourceGroup(rgName, vmssName); Assertions.assertEquals("access_server_byol", currentVirtualMachineScaleSet.plan().name()); Assertions.assertEquals("openvpn", currentVirtualMachineScaleSet.plan().publisher()); Assertions.assertEquals("openvpnas", currentVirtualMachineScaleSet.plan().product()); } @Test public void canUpdateVirtualMachineScaleSetWithExtensionProtectedSettings() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); final String uname = "jvuser"; final String password = password(); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); StorageAccount storageAccount = this .storageManager .storageAccounts() .define(generateRandomResourceName("stg", 15)) .withRegion(region) .withExistingResourceGroup(resourceGroup) .create(); List<StorageAccountKey> keys = storageAccount.getKeys(); Assertions.assertNotNull(keys); Assertions.assertTrue(keys.size() > 0); String storageAccountKey = keys.get(0).value(); Network network = this .networkManager .networks() .define(generateRandomResourceName("vmssvnet", 15)) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withoutPrimaryInternetFacingLoadBalancer() .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername(uname) .withSsh(sshPublicKey()) .withUnmanagedDisks() .withNewStorageAccount(generateRandomResourceName("stg", 15)) .withExistingStorageAccount(storageAccount) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("commandToExecute", "ls") .withProtectedSetting("storageAccountName", storageAccount.name()) .withProtectedSetting("storageAccountKey", storageAccountKey) .attach() .create(); Map<String, VirtualMachineScaleSetExtension> extensions = virtualMachineScaleSet.extensions(); Assertions.assertNotNull(extensions); Assertions.assertTrue(extensions.size() > 0); Assertions.assertTrue(extensions.containsKey("CustomScriptForLinux")); VirtualMachineScaleSetExtension extension = extensions.get("CustomScriptForLinux"); Assertions.assertNotNull(extension.publicSettings()); Assertions.assertEquals(1, extension.publicSettings().size()); Assertions.assertNotNull(extension.publicSettingsAsJsonString()); VirtualMachineScaleSet scaleSet = this.computeManager.virtualMachineScaleSets().getById(virtualMachineScaleSet.id()); extensions = scaleSet.extensions(); Assertions.assertNotNull(extensions); Assertions.assertTrue(extensions.size() > 0); Assertions.assertTrue(extensions.containsKey("CustomScriptForLinux")); extension = extensions.get("CustomScriptForLinux"); Assertions.assertNotNull(extension.publicSettings()); Assertions.assertEquals(1, extension.publicSettings().size()); Assertions.assertNotNull(extension.publicSettingsAsJsonString()); int newCapacity = scaleSet.capacity() + 1; virtualMachineScaleSet.update().withCapacity(newCapacity).apply(); extensions = virtualMachineScaleSet.extensions(); Assertions.assertNotNull(extensions); Assertions.assertTrue(extensions.size() > 0); Assertions.assertTrue(extensions.containsKey("CustomScriptForLinux")); extension = extensions.get("CustomScriptForLinux"); Assertions.assertNotNull(extension.publicSettings()); Assertions.assertEquals(1, extension.publicSettings().size()); Assertions.assertNotNull(extension.publicSettingsAsJsonString()); } @Test public void canCreateVirtualMachineScaleSetWithCustomScriptExtension() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); final String uname = "jvuser"; final String password = password(); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define(generateRandomResourceName("vmssvnet", 15)) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, resourceGroup, "1"); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername(uname) .withSsh(sshPublicKey()) .withUnmanagedDisks() .withNewStorageAccount(generateRandomResourceName("stg", 15)) .withNewStorageAccount(generateRandomResourceName("stg", 15)) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("commandToExecute", "ls") .attach() .withUpgradeMode(UpgradeMode.MANUAL) .create(); checkVMInstances(virtualMachineScaleSet); List<String> publicIPAddressIds = virtualMachineScaleSet.primaryPublicIpAddressIds(); PublicIpAddress publicIPAddress = this.networkManager.publicIpAddresses().getById(publicIPAddressIds.get(0)); String fqdn = publicIPAddress.fqdn(); Assertions.assertNotNull(fqdn); for (VirtualMachineScaleSetVM vm : virtualMachineScaleSet.virtualMachines().list()) { PagedIterable<VirtualMachineScaleSetNetworkInterface> networkInterfaces = vm.listNetworkInterfaces(); Assertions.assertEquals(TestUtilities.getSize(networkInterfaces), 1); VirtualMachineScaleSetNetworkInterface networkInterface = networkInterfaces.iterator().next(); VirtualMachineScaleSetNicIpConfiguration primaryIpConfig = null; primaryIpConfig = networkInterface.primaryIPConfiguration(); Assertions.assertNotNull(primaryIpConfig); Integer sshFrontendPort = null; List<LoadBalancerInboundNatRule> natRules = primaryIpConfig.listAssociatedLoadBalancerInboundNatRules(); for (LoadBalancerInboundNatRule natRule : natRules) { if (natRule.backendPort() == 22) { sshFrontendPort = natRule.frontendPort(); break; } } Assertions.assertNotNull(sshFrontendPort); } } @Test public void canCreateVirtualMachineScaleSetWithOptionalNetworkSettings() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); final String vmssVmDnsLabel = generateRandomResourceName("pip", 10); final String nsgName = generateRandomResourceName("nsg", 10); final String asgName = generateRandomResourceName("asg", 8); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); ApplicationSecurityGroup asg = this .networkManager .applicationSecurityGroups() .define(asgName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .create(); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_D3_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withoutPrimaryInternetFacingLoadBalancer() .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withVirtualMachinePublicIp(vmssVmDnsLabel) .withExistingApplicationSecurityGroup(asg) .create(); VirtualMachineScaleSetPublicIpAddressConfiguration currentIpConfig = virtualMachineScaleSet.virtualMachinePublicIpConfig(); Assertions.assertNotNull(currentIpConfig); Assertions.assertNotNull(currentIpConfig.dnsSettings()); Assertions.assertNotNull(currentIpConfig.dnsSettings().domainNameLabel()); currentIpConfig.withIdleTimeoutInMinutes(20); virtualMachineScaleSet.update().withVirtualMachinePublicIp(currentIpConfig).apply(); currentIpConfig = virtualMachineScaleSet.virtualMachinePublicIpConfig(); Assertions.assertNotNull(currentIpConfig); Assertions.assertNotNull(currentIpConfig.idleTimeoutInMinutes()); Assertions.assertEquals((long) 20, (long) currentIpConfig.idleTimeoutInMinutes()); virtualMachineScaleSet.refresh(); currentIpConfig = virtualMachineScaleSet.virtualMachinePublicIpConfig(); Assertions.assertNotNull(currentIpConfig); Assertions.assertNotNull(currentIpConfig.idleTimeoutInMinutes()); Assertions.assertEquals((long) 20, (long) currentIpConfig.idleTimeoutInMinutes()); List<String> asgIds = virtualMachineScaleSet.applicationSecurityGroupIds(); Assertions.assertNotNull(asgIds); Assertions.assertEquals(1, asgIds.size()); NetworkSecurityGroup nsg = networkManager .networkSecurityGroups() .define(nsgName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .defineRule("rule1") .allowOutbound() .fromAnyAddress() .fromPort(80) .toAnyAddress() .toPort(80) .withProtocol(SecurityRuleProtocol.TCP) .attach() .create(); virtualMachineScaleSet.deallocate(); virtualMachineScaleSet .update() .withIpForwarding() .withAcceleratedNetworking() .withExistingNetworkSecurityGroup(nsg) .apply(); Assertions.assertTrue(virtualMachineScaleSet.isIpForwardingEnabled()); Assertions.assertTrue(virtualMachineScaleSet.isAcceleratedNetworkingEnabled()); Assertions.assertNotNull(virtualMachineScaleSet.networkSecurityGroupId()); virtualMachineScaleSet.refresh(); Assertions.assertTrue(virtualMachineScaleSet.isIpForwardingEnabled()); Assertions.assertTrue(virtualMachineScaleSet.isAcceleratedNetworkingEnabled()); Assertions.assertNotNull(virtualMachineScaleSet.networkSecurityGroupId()); virtualMachineScaleSet .update() .withoutIpForwarding() .withoutAcceleratedNetworking() .withoutNetworkSecurityGroup() .apply(); Assertions.assertFalse(virtualMachineScaleSet.isIpForwardingEnabled()); Assertions.assertFalse(virtualMachineScaleSet.isAcceleratedNetworkingEnabled()); Assertions.assertNull(virtualMachineScaleSet.networkSecurityGroupId()); } @Test @Disabled("Mock framework doesn't support data plane") public void canCreateVirtualMachineScaleSetWithSecret() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); final String vaultName = generateRandomResourceName("vlt", 10); final String secretName = generateRandomResourceName("srt", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region, resourceGroup, "1", LoadBalancerSkuType.BASIC); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); Vault vault = this .keyVaultManager .vaults() .define(vaultName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .defineAccessPolicy() .forServicePrincipal(clientIdFromFile()) .allowSecretAllPermissions() .attach() .withDeploymentEnabled() .create(); final InputStream embeddedJsonConfig = VirtualMachineExtensionOperationsTests.class.getResourceAsStream("/myTest.txt"); String secretValue = IOUtils.toString(embeddedJsonConfig, StandardCharsets.UTF_8); Secret secret = vault.secrets().define(secretName).withValue(secretValue).create(); List<VaultCertificate> certs = new ArrayList<>(); certs.add(new VaultCertificate().withCertificateUrl(secret.id())); List<VaultSecretGroup> group = new ArrayList<>(); group .add( new VaultSecretGroup() .withSourceVault(new SubResource().withId(vault.id())) .withVaultCertificates(certs)); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withSecrets(group) .withNewStorageAccount(generateRandomResourceName("stg", 15)) .withNewStorageAccount(generateRandomResourceName("stg3", 15)) .withUpgradeMode(UpgradeMode.MANUAL) .create(); for (VirtualMachineScaleSetVM vm : virtualMachineScaleSet.virtualMachines().list()) { Assertions.assertTrue(vm.osProfile().secrets().size() > 0); } virtualMachineScaleSet.update().withoutSecrets().apply(); for (VirtualMachineScaleSetVM vm : virtualMachineScaleSet.virtualMachines().list()) { Assertions.assertTrue(vm.osProfile().secrets().size() == 0); } } public void canCreateVirtualMachineScaleSet() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region, resourceGroup, "1", LoadBalancerSkuType.BASIC); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withNewStorageAccount(generateRandomResourceName("stg", 15)) .withNewStorageAccount(generateRandomResourceName("stg", 15)) .create(); Assertions.assertNull(virtualMachineScaleSet.getPrimaryInternalLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternalLoadBalancerBackends().size() == 0); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternalLoadBalancerInboundNatPools().size() == 0); Assertions.assertNotNull(virtualMachineScaleSet.getPrimaryInternetFacingLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerBackends().size() == 2); Assertions .assertTrue(virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerInboundNatPools().size() == 2); Network primaryNetwork = virtualMachineScaleSet.getPrimaryNetwork(); Assertions.assertNotNull(primaryNetwork.id()); PagedIterable<VirtualMachineScaleSetNetworkInterface> nics = virtualMachineScaleSet.listNetworkInterfaces(); int nicCount = 0; for (VirtualMachineScaleSetNetworkInterface nic : nics) { nicCount++; Assertions.assertNotNull(nic.id()); Assertions .assertTrue(nic.virtualMachineId().toLowerCase().startsWith(virtualMachineScaleSet.id().toLowerCase())); Assertions.assertNotNull(nic.macAddress()); Assertions.assertNotNull(nic.dnsServers()); Assertions.assertNotNull(nic.appliedDnsServers()); Map<String, VirtualMachineScaleSetNicIpConfiguration> ipConfigs = nic.ipConfigurations(); Assertions.assertEquals(ipConfigs.size(), 1); for (Map.Entry<String, VirtualMachineScaleSetNicIpConfiguration> entry : ipConfigs.entrySet()) { VirtualMachineScaleSetNicIpConfiguration ipConfig = entry.getValue(); Assertions.assertNotNull(ipConfig); Assertions.assertTrue(ipConfig.isPrimary()); Assertions.assertNotNull(ipConfig.subnetName()); Assertions.assertTrue(primaryNetwork.id().toLowerCase().equalsIgnoreCase(ipConfig.networkId())); Assertions.assertNotNull(ipConfig.privateIpAddress()); Assertions.assertNotNull(ipConfig.privateIpAddressVersion()); Assertions.assertNotNull(ipConfig.privateIpAllocationMethod()); List<LoadBalancerBackend> lbBackends = ipConfig.listAssociatedLoadBalancerBackends(); Assertions.assertEquals(lbBackends.size(), 2); for (LoadBalancerBackend lbBackend : lbBackends) { Map<String, LoadBalancingRule> lbRules = lbBackend.loadBalancingRules(); Assertions.assertEquals(lbRules.size(), 1); for (Map.Entry<String, LoadBalancingRule> ruleEntry : lbRules.entrySet()) { LoadBalancingRule rule = ruleEntry.getValue(); Assertions.assertNotNull(rule); Assertions .assertTrue( (rule.frontendPort() == 80 && rule.backendPort() == 80) || (rule.frontendPort() == 443 && rule.backendPort() == 443)); } } List<LoadBalancerInboundNatRule> lbNatRules = ipConfig.listAssociatedLoadBalancerInboundNatRules(); Assertions.assertEquals(lbNatRules.size(), 2); for (LoadBalancerInboundNatRule lbNatRule : lbNatRules) { Assertions .assertTrue( (lbNatRule.frontendPort() >= 5000 && lbNatRule.frontendPort() <= 5099) || (lbNatRule.frontendPort() >= 6000 && lbNatRule.frontendPort() <= 6099)); Assertions.assertTrue(lbNatRule.backendPort() == 22 || lbNatRule.backendPort() == 23); } } } Assertions.assertTrue(nicCount > 0); Assertions.assertEquals(virtualMachineScaleSet.vhdContainers().size(), 2); Assertions.assertEquals(virtualMachineScaleSet.sku(), VirtualMachineScaleSetSkuTypes.STANDARD_A0); Assertions.assertTrue(virtualMachineScaleSet.upgradeModel() == UpgradeMode.AUTOMATIC); Assertions.assertEquals(virtualMachineScaleSet.capacity(), 2); primaryNetwork = virtualMachineScaleSet.getPrimaryNetwork(); String inboundNatPoolToRemove = null; for (String inboundNatPoolName : virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerInboundNatPools().keySet()) { inboundNatPoolToRemove = inboundNatPoolName; break; } LoadBalancer internalLoadBalancer = createInternalLoadBalancer(region, resourceGroup, primaryNetwork, "1"); virtualMachineScaleSet .update() .withExistingPrimaryInternalLoadBalancer(internalLoadBalancer) .withoutPrimaryInternetFacingLoadBalancerNatPools(inboundNatPoolToRemove) .apply(); virtualMachineScaleSet = this.computeManager.virtualMachineScaleSets().getByResourceGroup(rgName, vmssName); Assertions.assertNotNull(virtualMachineScaleSet.getPrimaryInternetFacingLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerBackends().size() == 2); Assertions .assertTrue(virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerInboundNatPools().size() == 1); Assertions.assertNotNull(virtualMachineScaleSet.getPrimaryInternalLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternalLoadBalancerBackends().size() == 2); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternalLoadBalancerInboundNatPools().size() == 2); nics = virtualMachineScaleSet.listNetworkInterfaces(); nicCount = 0; for (VirtualMachineScaleSetNetworkInterface nic : nics) { nicCount++; Map<String, VirtualMachineScaleSetNicIpConfiguration> ipConfigs = nic.ipConfigurations(); Assertions.assertEquals(ipConfigs.size(), 1); for (Map.Entry<String, VirtualMachineScaleSetNicIpConfiguration> entry : ipConfigs.entrySet()) { VirtualMachineScaleSetNicIpConfiguration ipConfig = entry.getValue(); Assertions.assertNotNull(ipConfig); List<LoadBalancerBackend> lbBackends = ipConfig.listAssociatedLoadBalancerBackends(); Assertions.assertNotNull(lbBackends); for (LoadBalancerBackend lbBackend : lbBackends) { Map<String, LoadBalancingRule> lbRules = lbBackend.loadBalancingRules(); Assertions.assertEquals(lbRules.size(), 1); for (Map.Entry<String, LoadBalancingRule> ruleEntry : lbRules.entrySet()) { LoadBalancingRule rule = ruleEntry.getValue(); Assertions.assertNotNull(rule); Assertions .assertTrue( (rule.frontendPort() == 80 && rule.backendPort() == 80) || (rule.frontendPort() == 443 && rule.backendPort() == 443) || (rule.frontendPort() == 1000 && rule.backendPort() == 1000) || (rule.frontendPort() == 1001 && rule.backendPort() == 1001)); } } List<LoadBalancerInboundNatRule> lbNatRules = ipConfig.listAssociatedLoadBalancerInboundNatRules(); for (LoadBalancerInboundNatRule lbNatRule : lbNatRules) { Assertions .assertTrue( (lbNatRule.frontendPort() >= 6000 && lbNatRule.frontendPort() <= 6099) || (lbNatRule.frontendPort() >= 5000 && lbNatRule.frontendPort() <= 5099) || (lbNatRule.frontendPort() >= 8000 && lbNatRule.frontendPort() <= 8099) || (lbNatRule.frontendPort() >= 9000 && lbNatRule.frontendPort() <= 9099)); Assertions .assertTrue( lbNatRule.backendPort() == 23 || lbNatRule.backendPort() == 22 || lbNatRule.backendPort() == 44 || lbNatRule.backendPort() == 45); } } } Assertions.assertTrue(nicCount > 0); } /* * Previously name * canCreateTwoRegionalVirtualMachineScaleSetsAndAssociateEachWithDifferentBackendPoolOfZoneResilientLoadBalancer * but this was too long for some OSes and would cause git checkout to fail. */ @Test public void canCreateTwoRegionalVMScaleSetsWithDifferentPoolOfZoneResilientLoadBalancer() throws Exception { Region region2 = Region.US_EAST2; ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region2).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region2) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region2, resourceGroup, "1", LoadBalancerSkuType.STANDARD); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); List<String> natpools = new ArrayList<>(); for (String natPool : publicLoadBalancer.inboundNatPools().keySet()) { natpools.add(natPool); } Assertions.assertTrue(natpools.size() == 2); final String vmssName1 = generateRandomResourceName("vmss1", 10); VirtualMachineScaleSet virtualMachineScaleSet1 = this .computeManager .virtualMachineScaleSets() .define(vmssName1) .withRegion(region2) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0)) .withPrimaryInternetFacingLoadBalancerInboundNatPools(natpools.get(0)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); final String vmssName2 = generateRandomResourceName("vmss2", 10); VirtualMachineScaleSet virtualMachineScaleSet2 = this .computeManager .virtualMachineScaleSets() .define(vmssName2) .withRegion(region2) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(1)) .withPrimaryInternetFacingLoadBalancerInboundNatPools(natpools.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); Assertions.assertNull(virtualMachineScaleSet1.getPrimaryInternalLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet1.listPrimaryInternalLoadBalancerBackends().size() == 0); Assertions.assertTrue(virtualMachineScaleSet1.listPrimaryInternalLoadBalancerInboundNatPools().size() == 0); Assertions.assertNotNull(virtualMachineScaleSet1.getPrimaryInternetFacingLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet1.listPrimaryInternetFacingLoadBalancerBackends().size() == 1); Assertions.assertNull(virtualMachineScaleSet2.getPrimaryInternalLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet2.listPrimaryInternalLoadBalancerBackends().size() == 0); Assertions.assertTrue(virtualMachineScaleSet2.listPrimaryInternalLoadBalancerInboundNatPools().size() == 0); Assertions.assertNotNull(virtualMachineScaleSet2.getPrimaryInternetFacingLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet2.listPrimaryInternetFacingLoadBalancerBackends().size() == 1); } @Test public void canCreateZoneRedundantVirtualMachineScaleSetWithZoneResilientLoadBalancer() throws Exception { Region region2 = Region.US_EAST2; ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region2).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region2) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region2, resourceGroup, "1", LoadBalancerSkuType.STANDARD); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); final String vmssName = generateRandomResourceName("vmss", 10); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region2) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_D3_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withAvailabilityZone(AvailabilityZoneId.ZONE_1) .withAvailabilityZone(AvailabilityZoneId.ZONE_2) .create(); Assertions.assertNotNull(virtualMachineScaleSet.availabilityZones()); Assertions.assertEquals(2, virtualMachineScaleSet.availabilityZones().size()); Assertions.assertNull(virtualMachineScaleSet.getPrimaryInternalLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternalLoadBalancerBackends().size() == 0); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternalLoadBalancerInboundNatPools().size() == 0); Assertions.assertNotNull(virtualMachineScaleSet.getPrimaryInternetFacingLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerBackends().size() == 2); Assertions .assertTrue(virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerInboundNatPools().size() == 2); Network primaryNetwork = virtualMachineScaleSet.getPrimaryNetwork(); Assertions.assertNotNull(primaryNetwork.id()); } @Test public void canEnableMSIOnVirtualMachineScaleSetWithoutRoleAssignment() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region, resourceGroup, "1", LoadBalancerSkuType.BASIC); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withSystemAssignedManagedServiceIdentity() .create(); PagedIterable<RoleAssignment> rgRoleAssignments = authorizationManager.roleAssignments().listByScope(resourceGroup.id()); Assertions.assertNotNull(rgRoleAssignments); boolean found = false; for (RoleAssignment roleAssignment : rgRoleAssignments) { if (roleAssignment.principalId() != null && roleAssignment .principalId() .equalsIgnoreCase(virtualMachineScaleSet.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions .assertFalse( found, "Resource group should not have a role assignment with virtual machine scale set MSI principal"); } @Test public void canEnableMSIOnVirtualMachineScaleSetWithMultipleRoleAssignment() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region, resourceGroup, "1", LoadBalancerSkuType.BASIC); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); StorageAccount storageAccount = this .storageManager .storageAccounts() .define(generateRandomResourceName("jvcsrg", 10)) .withRegion(region) .withExistingResourceGroup(resourceGroup) .create(); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withSystemAssignedManagedServiceIdentity() .withSystemAssignedIdentityBasedAccessToCurrentResourceGroup(BuiltInRole.CONTRIBUTOR) .withSystemAssignedIdentityBasedAccessTo(storageAccount.id(), BuiltInRole.CONTRIBUTOR) .create(); Assertions.assertNotNull(virtualMachineScaleSet.managedServiceIdentityType()); Assertions .assertTrue( virtualMachineScaleSet.managedServiceIdentityType().equals(ResourceIdentityType.SYSTEM_ASSIGNED)); PagedIterable<RoleAssignment> rgRoleAssignments = authorizationManager.roleAssignments().listByScope(resourceGroup.id()); Assertions.assertNotNull(rgRoleAssignments); boolean found = false; for (RoleAssignment roleAssignment : rgRoleAssignments) { if (roleAssignment.principalId() != null && roleAssignment .principalId() .equalsIgnoreCase(virtualMachineScaleSet.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions .assertTrue( found, "Resource group should have a role assignment with virtual machine scale set MSI principal"); PagedIterable<RoleAssignment> stgRoleAssignments = authorizationManager.roleAssignments().listByScope(storageAccount.id()); Assertions.assertNotNull(stgRoleAssignments); found = false; for (RoleAssignment roleAssignment : stgRoleAssignments) { if (roleAssignment.principalId() != null && roleAssignment .principalId() .equalsIgnoreCase(virtualMachineScaleSet.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions .assertTrue( found, "Storage account should have a role assignment with virtual machine scale set MSI principal"); } @Test public void canGetSingleVMSSInstance() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region, resourceGroup, "1", LoadBalancerSkuType.BASIC); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withNewStorageAccount(generateRandomResourceName("stg", 15)) .withNewStorageAccount(generateRandomResourceName("stg3", 15)) .withUpgradeMode(UpgradeMode.MANUAL) .create(); VirtualMachineScaleSet virtualMachineScaleSet = this.computeManager.virtualMachineScaleSets().getByResourceGroup(rgName, vmssName); VirtualMachineScaleSetVMs virtualMachineScaleSetVMs = virtualMachineScaleSet.virtualMachines(); VirtualMachineScaleSetVM firstVm = virtualMachineScaleSetVMs.list().iterator().next(); VirtualMachineScaleSetVM fetchedVm = virtualMachineScaleSetVMs.getInstance(firstVm.instanceId()); this.checkVmsEqual(firstVm, fetchedVm); VirtualMachineScaleSetVM fetchedAsyncVm = virtualMachineScaleSetVMs.getInstanceAsync(firstVm.instanceId()).block(); this.checkVmsEqual(firstVm, fetchedAsyncVm); } @Test public void canCreateLowPriorityVMSSInstance() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region, resourceGroup, "1", LoadBalancerSkuType.STANDARD); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); VirtualMachineScaleSet vmss = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_D3_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withNewStorageAccount(generateRandomResourceName("stg", 15)) .withNewStorageAccount(generateRandomResourceName("stg3", 15)) .withUpgradeMode(UpgradeMode.MANUAL) .withLowPriorityVirtualMachine(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withMaxPrice(-1.0) .create(); Assertions.assertEquals(vmss.virtualMachinePriority(), VirtualMachinePriorityTypes.LOW); Assertions.assertEquals(vmss.virtualMachineEvictionPolicy(), VirtualMachineEvictionPolicyTypes.DEALLOCATE); Assertions.assertEquals(vmss.billingProfile().maxPrice(), (Double) (-1.0)); vmss.update().withMaxPrice(2000.0).apply(); Assertions.assertEquals(vmss.billingProfile().maxPrice(), (Double) 2000.0); } @Test public void canPerformSimulateEvictionOnSpotVMSSInstance() { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups() .define(rgName) .withRegion(region) .create(); Network network = this.networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); VirtualMachineScaleSet vmss = computeManager.virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_D3_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withoutPrimaryInternetFacingLoadBalancer() .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withSpotPriorityVirtualMachine(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .create(); PagedIterable<VirtualMachineScaleSetVM> vmInstances = vmss.virtualMachines().list(); for (VirtualMachineScaleSetVM instance: vmInstances) { Assertions.assertTrue(instance.osDiskSizeInGB() > 0); vmss.virtualMachines().simulateEviction(instance.instanceId()); } boolean deallocated = false; int pollIntervalInMinutes = 5; for (int i = 0; i < 30; i += pollIntervalInMinutes) { ResourceManagerUtils.sleep(Duration.ofMinutes(pollIntervalInMinutes)); deallocated = true; for (VirtualMachineScaleSetVM instance: vmInstances) { instance.refresh(); if (instance.powerState() != PowerState.DEALLOCATED) { deallocated = false; } } if (deallocated) { break; } } Assertions.assertTrue(deallocated); for (VirtualMachineScaleSetVM instance: vmInstances) { instance.refresh(); Assertions.assertEquals(0, instance.osDiskSizeInGB()); } } private void checkVmsEqual(VirtualMachineScaleSetVM original, VirtualMachineScaleSetVM fetched) { Assertions.assertEquals(original.administratorUserName(), fetched.administratorUserName()); Assertions.assertEquals(original.availabilitySetId(), fetched.availabilitySetId()); Assertions.assertEquals(original.bootDiagnosticEnabled(), fetched.bootDiagnosticEnabled()); Assertions.assertEquals(original.bootDiagnosticStorageAccountUri(), fetched.bootDiagnosticStorageAccountUri()); Assertions.assertEquals(original.computerName(), fetched.computerName()); Assertions.assertEquals(original.dataDisks().size(), fetched.dataDisks().size()); Assertions.assertEquals(original.extensions().size(), fetched.extensions().size()); Assertions.assertEquals(original.instanceId(), fetched.instanceId()); Assertions.assertEquals(original.isLatestScaleSetUpdateApplied(), fetched.isLatestScaleSetUpdateApplied()); Assertions.assertEquals(original.isLinuxPasswordAuthenticationEnabled(), fetched.isLinuxPasswordAuthenticationEnabled()); Assertions.assertEquals(original.isManagedDiskEnabled(), fetched.isManagedDiskEnabled()); Assertions.assertEquals(original.isOSBasedOnCustomImage(), fetched.isOSBasedOnCustomImage()); Assertions.assertEquals(original.isOSBasedOnPlatformImage(), fetched.isOSBasedOnPlatformImage()); Assertions.assertEquals(original.isOSBasedOnStoredImage(), fetched.isOSBasedOnStoredImage()); Assertions.assertEquals(original.isWindowsAutoUpdateEnabled(), fetched.isWindowsAutoUpdateEnabled()); Assertions.assertEquals(original.isWindowsVMAgentProvisioned(), original.isWindowsVMAgentProvisioned()); Assertions.assertEquals(original.networkInterfaceIds().size(), fetched.networkInterfaceIds().size()); Assertions.assertEquals(original.osDiskCachingType(), fetched.osDiskCachingType()); Assertions.assertEquals(original.osDiskId(), fetched.osDiskId()); Assertions.assertEquals(original.osDiskName(), fetched.osDiskName()); Assertions.assertEquals(original.osDiskSizeInGB(), fetched.osDiskSizeInGB()); Assertions.assertEquals(original.osType(), fetched.osType()); Assertions.assertEquals(original.osUnmanagedDiskVhdUri(), fetched.osUnmanagedDiskVhdUri()); Assertions.assertEquals(original.powerState(), fetched.powerState()); Assertions.assertEquals(original.primaryNetworkInterfaceId(), fetched.primaryNetworkInterfaceId()); Assertions.assertEquals(original.size(), fetched.size()); Assertions.assertEquals(original.sku().name(), fetched.sku().name()); Assertions.assertEquals(original.storedImageUnmanagedVhdUri(), fetched.storedImageUnmanagedVhdUri()); Assertions.assertEquals(original.unmanagedDataDisks().size(), fetched.unmanagedDataDisks().size()); Assertions.assertEquals(original.windowsTimeZone(), fetched.windowsTimeZone()); } @Test public void testVirtualMachineScaleSetSkuTypes() { rgName = null; VirtualMachineScaleSetSkuTypes skuType = VirtualMachineScaleSetSkuTypes.STANDARD_A0; Assertions.assertNull(skuType.sku().capacity()); Sku sku1 = skuType.sku(); Assertions.assertNull(sku1.capacity()); sku1.withCapacity(1L); Assertions.assertEquals(sku1.capacity().longValue(), 1); Assertions.assertNull(skuType.sku().capacity()); Sku sku2 = skuType.sku(); Assertions.assertNull(sku2.capacity()); sku2.withCapacity(2L); Assertions.assertEquals(sku2.capacity().longValue(), 2); Assertions.assertNull(skuType.sku().capacity()); Assertions.assertEquals(sku1.capacity().longValue(), 1); } @Test public void canDeleteVMSSInstance() throws Exception { String euapRegion = "eastus2euap"; final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName) .withRegion(euapRegion) .create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(euapRegion) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); VirtualMachineScaleSet vmss = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(euapRegion) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withoutPrimaryInternetFacingLoadBalancer() .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(4) .create(); Assertions.assertEquals(4, vmss.virtualMachines().list().stream().count()); List<String> firstTwoIds = vmss.virtualMachines().list().stream() .limit(2) .map(VirtualMachineScaleSetVM::instanceId) .collect(Collectors.toList()); vmss.virtualMachines().deleteInstances(firstTwoIds, true); Assertions.assertEquals(2, vmss.virtualMachines().list().stream().count()); vmss.virtualMachines().deleteInstances(Collections.singleton(vmss.virtualMachines().list().stream().findFirst().get().instanceId()), false); Assertions.assertEquals(1, vmss.virtualMachines().list().stream().count()); computeManager.virtualMachineScaleSets().deleteInstances(rgName, vmssName, Collections.singleton(vmss.virtualMachines().list().stream().findFirst().get().instanceId()), false); } }
For LLC-style client methods, `RequestOptions` contains the `Context` within it. Hence in new guideline, we would require `RequestOptions` instead. E.g. ```java @Generated @ServiceMethod(returns = ReturnType.SINGLE) public Response<BinaryData> regenerateAccessKeyWithResponse(BinaryData keyOptions, RequestOptions requestOptions) { return this.serviceClient.regenerateAccessKeyWithResponse(keyOptions, requestOptions); } ```
private void checkContextInRightPlace(DetailAST methodDefToken) { final DetailAST parametersToken = methodDefToken.findFirstToken(TokenTypes.PARAMETERS); final String returnType = getReturnType(methodDefToken.findFirstToken(TokenTypes.TYPE), new StringBuilder()).toString(); final boolean containsContextParameter = TokenUtil.findFirstTokenByPredicate(parametersToken, parameterToken -> { if (parameterToken.getType() != TokenTypes.PARAMETER_DEF) { return false; } final DetailAST paramTypeIdentToken = parameterToken.findFirstToken(TokenTypes.TYPE).findFirstToken(TokenTypes.IDENT); return paramTypeIdentToken != null && CONTEXT.equals(paramTypeIdentToken.getText()); }) .isPresent(); final boolean containsRequestOptionsParameter = TokenUtil.findFirstTokenByPredicate(parametersToken, parameterToken -> { if (parameterToken.getType() != TokenTypes.PARAMETER_DEF) { return false; } final DetailAST paramTypeIdentToken = parameterToken.findFirstToken(TokenTypes.TYPE).findFirstToken(TokenTypes.IDENT); return paramTypeIdentToken != null && REQUEST_OPTIONS.equals(paramTypeIdentToken.getText()); }) .isPresent(); if (containsContextParameter) { if (returnType.startsWith(MONO_BRACKET) || returnType.startsWith(PAGED_FLUX_BRACKET) || returnType.startsWith(POLLER_FLUX_BRACKET)) { log(methodDefToken, String.format(ASYNC_CONTEXT_ERROR, CONTEXT)); } } else { if (!containsRequestOptionsParameter) { if (returnType.startsWith(RESPONSE_BRACKET)) { log(methodDefToken, String.format(SYNC_CONTEXT_ERROR, CONTEXT, REQUEST_OPTIONS)); } } } }
}
private void checkContextInRightPlace(DetailAST methodDefToken) { final DetailAST parametersToken = methodDefToken.findFirstToken(TokenTypes.PARAMETERS); final String returnType = getReturnType(methodDefToken.findFirstToken(TokenTypes.TYPE), new StringBuilder()).toString(); final boolean containsContextParameter = TokenUtil.findFirstTokenByPredicate(parametersToken, parameterToken -> { if (parameterToken.getType() != TokenTypes.PARAMETER_DEF) { return false; } final DetailAST paramTypeIdentToken = parameterToken.findFirstToken(TokenTypes.TYPE).findFirstToken(TokenTypes.IDENT); return paramTypeIdentToken != null && CONTEXT.equals(paramTypeIdentToken.getText()); }) .isPresent(); final boolean containsRequestOptionsParameter = TokenUtil.findFirstTokenByPredicate(parametersToken, parameterToken -> { if (parameterToken.getType() != TokenTypes.PARAMETER_DEF) { return false; } final DetailAST paramTypeIdentToken = parameterToken.findFirstToken(TokenTypes.TYPE).findFirstToken(TokenTypes.IDENT); return paramTypeIdentToken != null && REQUEST_OPTIONS.equals(paramTypeIdentToken.getText()); }) .isPresent(); if (containsContextParameter) { if (returnType.startsWith(MONO_BRACKET) || returnType.startsWith(PAGED_FLUX_BRACKET) || returnType.startsWith(POLLER_FLUX_BRACKET)) { log(methodDefToken, String.format(ASYNC_CONTEXT_ERROR, CONTEXT)); } } else { if (!containsRequestOptionsParameter) { if (returnType.startsWith(RESPONSE_BRACKET)) { log(methodDefToken, String.format(SYNC_CONTEXT_ERROR, CONTEXT, REQUEST_OPTIONS)); } } } }
class annotated with @ServiceClient should * follow below rules: * 1) Methods should not have "Async" added to the method name. * 2) The return type of async and sync clients should be as per guidelines: * 2.1) The return type for async collection should be of type? extends PagedFlux. * 2.2) The return type for async single value should be of type? extends Mono. * 2.3) The return type for sync collection should be of type? extends PagedIterable. * 2.4) The return type for sync single value should be of type? extends Response. * 3) Naming pattern for 'WithResponse'. * 4) Synchronous method with annotation @ServiceMethod has to have {@code Context}
class annotated with @ServiceClient should * follow below rules: * 1) Methods should not have "Async" added to the method name. * 2) The return type of async and sync clients should be as per guidelines: * 2.1) The return type for async collection should be of type? extends PagedFlux. * 2.2) The return type for async single value should be of type? extends Mono. * 2.3) The return type for sync collection should be of type? extends PagedIterable. * 2.4) The return type for sync single value should be of type? extends Response. * 3) Naming pattern for 'WithResponse'. * 4) Synchronous method with annotation @ServiceMethod has to have {@code Context}
does the new method `LocationCache#getRegionName(URI locationEndpoint, operationType)` relying on the internal state of the LocationCache guarantee that always return correct region name in any scenario? LocationCache state can evolve over the course of time, doesn't that cause any problem in the correctness of the returned region? I would like you to think about any edge cases :-)
public String getRegionName(URI locationEndpoint, com.azure.cosmos.implementation.OperationType operationType) { Utils.ValueHolder<String> regionName = new Utils.ValueHolder<>(); if (operationType.isWriteOperation()) { if (Utils.tryGetValue(this.locationInfo.regionNameByWriteEndpoint, locationEndpoint, regionName)) { return regionName.v; } } else { if (Utils.tryGetValue(this.locationInfo.regionNameByReadEndpoint, locationEndpoint, regionName)) { return regionName.v; } } return this.locationInfo.availableWriteEndpointByLocation.keySet().iterator().next(); }
if (Utils.tryGetValue(this.locationInfo.regionNameByWriteEndpoint, locationEndpoint, regionName)) {
public String getRegionName(URI locationEndpoint, com.azure.cosmos.implementation.OperationType operationType) { Utils.ValueHolder<String> regionName = new Utils.ValueHolder<>(); if (operationType.isWriteOperation()) { if (Utils.tryGetValue(this.locationInfo.regionNameByWriteEndpoint, locationEndpoint, regionName)) { return regionName.v; } } else { if (Utils.tryGetValue(this.locationInfo.regionNameByReadEndpoint, locationEndpoint, regionName)) { return regionName.v; } } return this.locationInfo.availableWriteEndpointByLocation.keySet().iterator().next(); }
class LocationCache { private final static Logger logger = LoggerFactory.getLogger(LocationCache.class); private final boolean enableEndpointDiscovery; private final URI defaultEndpoint; private final boolean useMultipleWriteLocations; private final Object lockObject; private final Duration unavailableLocationsExpirationTime; private final ConcurrentHashMap<URI, LocationUnavailabilityInfo> locationUnavailabilityInfoByEndpoint; private DatabaseAccountLocationsInfo locationInfo; private Instant lastCacheUpdateTimestamp; private boolean enableMultipleWriteLocations; public LocationCache( List<String> preferredLocations, URI defaultEndpoint, boolean enableEndpointDiscovery, boolean useMultipleWriteLocations, Configs configs) { this.locationInfo = new DatabaseAccountLocationsInfo(preferredLocations, defaultEndpoint); this.defaultEndpoint = defaultEndpoint; this.enableEndpointDiscovery = enableEndpointDiscovery; this.useMultipleWriteLocations = useMultipleWriteLocations; this.lockObject = new Object(); this.locationUnavailabilityInfoByEndpoint = new ConcurrentHashMap<>(); this.lastCacheUpdateTimestamp = Instant.MIN; this.enableMultipleWriteLocations = false; this.unavailableLocationsExpirationTime = Duration.ofSeconds(configs.getUnavailableLocationsExpirationTimeInSeconds()); } /** * Gets list of read endpoints ordered by * * 1. Preferred location * 2. Endpoint availability * @return */ public UnmodifiableList<URI> getReadEndpoints() { if (this.locationUnavailabilityInfoByEndpoint.size() > 0 && unavailableLocationsExpirationTimePassed()) { this.updateLocationCache(); } return this.locationInfo.readEndpoints; } /** * Gets list of write endpoints ordered by * 1. Preferred location * 2. Endpoint availability * @return */ public UnmodifiableList<URI> getWriteEndpoints() { if (this.locationUnavailabilityInfoByEndpoint.size() > 0 && unavailableLocationsExpirationTimePassed()) { this.updateLocationCache(); } return this.locationInfo.writeEndpoints; } /** * Marks the current location unavailable for read */ public void markEndpointUnavailableForRead(URI endpoint) { this.markEndpointUnavailable(endpoint, OperationType.Read); } /** * Marks the current location unavailable for write */ public void markEndpointUnavailableForWrite(URI endpoint) { this.markEndpointUnavailable(endpoint, OperationType.Write); } /** * Invoked when {@link DatabaseAccount} is read * @param databaseAccount READ DatabaseAccount */ public void onDatabaseAccountRead(DatabaseAccount databaseAccount) { this.updateLocationCache( databaseAccount.getWritableLocations(), databaseAccount.getReadableLocations(), null, BridgeInternal.isEnableMultipleWriteLocations(databaseAccount)); } void onLocationPreferenceChanged(UnmodifiableList<String> preferredLocations) { this.updateLocationCache( null, null , preferredLocations, null); } /** * Resolves request to service endpoint. * 1. If this is a write request * (a) If UseMultipleWriteLocations = true * (i) For document writes, resolve to most preferred and available write endpoint. * Once the endpoint is marked unavailable, it is moved to the end of available write endpoint. Current request will * be retried on next preferred available write endpoint. * (ii) For all other resources, always resolve to first/second (regardless of preferred locations) * write getEndpoint in {@link DatabaseAccount * Endpoint of first write location in {@link DatabaseAccount * write operation on all resource types (except during that region's failover). * Only during manual failover, client would retry write on second write location in {@link DatabaseAccount * (b) Else resolve the request to first write getEndpoint in {@link DatabaseAccount * second write getEndpoint in {@link DatabaseAccount * 2. Else resolve the request to most preferred available read getEndpoint (getAutomatic failover for read requests) * @param request Request for which getEndpoint is to be resolved * @return Resolved getEndpoint */ public URI resolveServiceEndpoint(RxDocumentServiceRequest request) { Objects.requireNonNull(request.requestContext, "RxDocumentServiceRequest.requestContext is required and cannot be null."); if(request.requestContext.locationEndpointToRoute != null) { return request.requestContext.locationEndpointToRoute; } int locationIndex = Utils.getValueOrDefault(request.requestContext.locationIndexToRoute, 0); boolean usePreferredLocations = request.requestContext.usePreferredLocations != null ? request.requestContext.usePreferredLocations : true; if(!usePreferredLocations || (request.getOperationType().isWriteOperation() && !this.canUseMultipleWriteLocations(request))) { DatabaseAccountLocationsInfo currentLocationInfo = this.locationInfo; if(this.enableEndpointDiscovery && currentLocationInfo.availableWriteLocations.size() > 0) { locationIndex = Math.min(locationIndex%2, currentLocationInfo.availableWriteLocations.size()-1); String writeLocation = currentLocationInfo.availableWriteLocations.get(locationIndex); return currentLocationInfo.availableWriteEndpointByLocation.get(writeLocation); } else { return this.defaultEndpoint; } } else { UnmodifiableList<URI> endpoints = request.getOperationType().isWriteOperation()? this.getWriteEndpoints() : this.getReadEndpoints(); return endpoints.get(locationIndex % endpoints.size()); } } public boolean shouldRefreshEndpoints(Utils.ValueHolder<Boolean> canRefreshInBackground) { canRefreshInBackground.v = true; DatabaseAccountLocationsInfo currentLocationInfo = this.locationInfo; String mostPreferredLocation = Utils.firstOrDefault(currentLocationInfo.preferredLocations); if (this.enableEndpointDiscovery) { boolean shouldRefresh = this.useMultipleWriteLocations && !this.enableMultipleWriteLocations; List<URI> readLocationEndpoints = currentLocationInfo.readEndpoints; if (this.isEndpointUnavailable(readLocationEndpoints.get(0), OperationType.Read)) { canRefreshInBackground.v = anyEndpointsAvailable(readLocationEndpoints,OperationType.Read); logger.debug("shouldRefreshEndpoints = true, since the first read endpoint " + "[{}] is not available for read. canRefreshInBackground = [{}]", readLocationEndpoints.get(0), canRefreshInBackground.v); return true; } if (!Strings.isNullOrEmpty(mostPreferredLocation)) { Utils.ValueHolder<URI> mostPreferredReadEndpointHolder = new Utils.ValueHolder<>(); logger.debug("getReadEndpoints [{}]", readLocationEndpoints); if (Utils.tryGetValue(currentLocationInfo.availableReadEndpointByLocation, mostPreferredLocation, mostPreferredReadEndpointHolder)) { logger.debug("most preferred is [{}], most preferred available is [{}]", mostPreferredLocation, mostPreferredReadEndpointHolder.v); if (!areEqual(mostPreferredReadEndpointHolder.v, readLocationEndpoints.get(0))) { logger.debug("shouldRefreshEndpoints = true, most preferred location [{}]" + " is not available for read.", mostPreferredLocation); return true; } logger.debug("most preferred is [{}], and most preferred available [{}] are the same", mostPreferredLocation, mostPreferredReadEndpointHolder.v); } else { logger.debug("shouldRefreshEndpoints = true, most preferred location [{}] " + "is not in available read locations.", mostPreferredLocation); return true; } } Utils.ValueHolder<URI> mostPreferredWriteEndpointHolder = new Utils.ValueHolder<>(); List<URI> writeLocationEndpoints = currentLocationInfo.writeEndpoints; logger.debug("getWriteEndpoints [{}]", writeLocationEndpoints); if (!this.canUseMultipleWriteLocations()) { if (this.isEndpointUnavailable(writeLocationEndpoints.get(0), OperationType.Write)) { canRefreshInBackground.v = anyEndpointsAvailable(writeLocationEndpoints,OperationType.Write); logger.debug("shouldRefreshEndpoints = true, most preferred location " + "[{}] endpoint [{}] is not available for write. canRefreshInBackground = [{}]", mostPreferredLocation, writeLocationEndpoints.get(0), canRefreshInBackground.v); return true; } else { logger.debug("shouldRefreshEndpoints: false, [{}] is available for Write", writeLocationEndpoints.get(0)); return shouldRefresh; } } else if (!Strings.isNullOrEmpty(mostPreferredLocation)) { if (Utils.tryGetValue(currentLocationInfo.availableWriteEndpointByLocation, mostPreferredLocation, mostPreferredWriteEndpointHolder)) { shouldRefresh = ! areEqual(mostPreferredWriteEndpointHolder.v,writeLocationEndpoints.get(0)); if (shouldRefresh) { logger.debug("shouldRefreshEndpoints: true, write endpoint [{}] is not the same as most preferred [{}]", writeLocationEndpoints.get(0), mostPreferredWriteEndpointHolder.v); } else { logger.debug("shouldRefreshEndpoints: false, write endpoint [{}] is the same as most preferred [{}]", writeLocationEndpoints.get(0), mostPreferredWriteEndpointHolder.v); } return shouldRefresh; } else { logger.debug("shouldRefreshEndpoints = true, most preferred location [{}] is not in available write locations", mostPreferredLocation); return true; } } else { logger.debug("shouldRefreshEndpoints: false, mostPreferredLocation [{}] is empty", mostPreferredLocation); return shouldRefresh; } } else { logger.debug("shouldRefreshEndpoints: false, endpoint discovery not enabled"); return false; } } private boolean areEqual(URI url1, URI url2) { return url1.equals(url2); } private void clearStaleEndpointUnavailabilityInfo() { if (!this.locationUnavailabilityInfoByEndpoint.isEmpty()) { List<URI> unavailableEndpoints = new ArrayList<>(this.locationUnavailabilityInfoByEndpoint.keySet()); for (URI unavailableEndpoint: unavailableEndpoints) { Utils.ValueHolder<LocationUnavailabilityInfo> unavailabilityInfoHolder = new Utils.ValueHolder<>(); Utils.ValueHolder<LocationUnavailabilityInfo> removedHolder = new Utils.ValueHolder<>(); if (Utils.tryGetValue(this.locationUnavailabilityInfoByEndpoint, unavailableEndpoint, unavailabilityInfoHolder) && durationPassed(Instant.now(), unavailabilityInfoHolder.v.LastUnavailabilityCheckTimeStamp, this.unavailableLocationsExpirationTime) && Utils.tryRemove(this.locationUnavailabilityInfoByEndpoint, unavailableEndpoint, removedHolder)) { logger.debug( "Removed endpoint [{}] unavailable for operations [{}] from unavailableEndpoints", unavailableEndpoint, unavailabilityInfoHolder.v.UnavailableOperations); } } } } private boolean isEndpointUnavailable(URI endpoint, OperationType expectedAvailableOperations) { Utils.ValueHolder<LocationUnavailabilityInfo> unavailabilityInfoHolder = new Utils.ValueHolder<>(); if (expectedAvailableOperations == OperationType.None || !Utils.tryGetValue(this.locationUnavailabilityInfoByEndpoint, endpoint, unavailabilityInfoHolder) || !unavailabilityInfoHolder.v.UnavailableOperations.supports(expectedAvailableOperations)) { return false; } else { if (durationPassed(Instant.now(), unavailabilityInfoHolder.v.LastUnavailabilityCheckTimeStamp, this.unavailableLocationsExpirationTime)) { return false; } else { logger.debug( "Endpoint [{}] unavailable for operations [{}] present in unavailableEndpoints", endpoint, unavailabilityInfoHolder.v.UnavailableOperations); return true; } } } private boolean anyEndpointsAvailable(List<URI> endpoints, OperationType expectedAvailableOperations) { Utils.ValueHolder<LocationUnavailabilityInfo> unavailabilityInfoHolder = new Utils.ValueHolder<>(); boolean anyEndpointsAvailable = false; for (URI endpoint : endpoints) { if (!isEndpointUnavailable(endpoint, expectedAvailableOperations)) { anyEndpointsAvailable = true; break; } } return anyEndpointsAvailable; } private void markEndpointUnavailable( URI unavailableEndpoint, OperationType unavailableOperationType) { Instant currentTime = Instant.now(); LocationUnavailabilityInfo updatedInfo = this.locationUnavailabilityInfoByEndpoint.compute( unavailableEndpoint, new BiFunction<URI, LocationUnavailabilityInfo, LocationUnavailabilityInfo>() { @Override public LocationUnavailabilityInfo apply(URI url, LocationUnavailabilityInfo info) { if (info == null) { return new LocationUnavailabilityInfo(currentTime, unavailableOperationType); } else { info.LastUnavailabilityCheckTimeStamp = currentTime; info.UnavailableOperations = OperationType.combine(info.UnavailableOperations, unavailableOperationType); return info; } } }); this.updateLocationCache(); logger.debug( "Endpoint [{}] unavailable for [{}] added/updated to unavailableEndpoints with timestamp [{}]", unavailableEndpoint, unavailableOperationType, updatedInfo.LastUnavailabilityCheckTimeStamp); } private void updateLocationCache(){ updateLocationCache(null, null, null, null); } private void updateLocationCache( Iterable<DatabaseAccountLocation> writeLocations, Iterable<DatabaseAccountLocation> readLocations, UnmodifiableList<String> preferenceList, Boolean enableMultipleWriteLocations) { synchronized (this.lockObject) { DatabaseAccountLocationsInfo nextLocationInfo = new DatabaseAccountLocationsInfo(this.locationInfo); logger.debug("updating location cache ..., current readLocations [{}], current writeLocations [{}]", nextLocationInfo.readEndpoints, nextLocationInfo.writeEndpoints); if (preferenceList != null) { nextLocationInfo.preferredLocations = preferenceList; } if (enableMultipleWriteLocations != null) { this.enableMultipleWriteLocations = enableMultipleWriteLocations; } this.clearStaleEndpointUnavailabilityInfo(); if (readLocations != null) { Utils.ValueHolder<UnmodifiableList<String>> out = Utils.ValueHolder.initialize(nextLocationInfo.availableReadLocations); Utils.ValueHolder<UnmodifiableMap<URI, String>> outReadRegionMap = Utils.ValueHolder.initialize(nextLocationInfo.regionNameByReadEndpoint); nextLocationInfo.availableReadEndpointByLocation = this.getEndpointByLocation(readLocations, out, outReadRegionMap); nextLocationInfo.availableReadLocations = out.v; nextLocationInfo.regionNameByReadEndpoint = outReadRegionMap.v; } if (writeLocations != null) { Utils.ValueHolder<UnmodifiableList<String>> out = Utils.ValueHolder.initialize(nextLocationInfo.availableWriteLocations); Utils.ValueHolder<UnmodifiableMap<URI, String>> outWriteRegionMap = Utils.ValueHolder.initialize(nextLocationInfo.regionNameByWriteEndpoint); nextLocationInfo.availableWriteEndpointByLocation = this.getEndpointByLocation(writeLocations, out, outWriteRegionMap); nextLocationInfo.availableWriteLocations = out.v; nextLocationInfo.regionNameByWriteEndpoint = outWriteRegionMap.v; } nextLocationInfo.writeEndpoints = this.getPreferredAvailableEndpoints(nextLocationInfo.availableWriteEndpointByLocation, nextLocationInfo.availableWriteLocations, OperationType.Write, this.defaultEndpoint); nextLocationInfo.readEndpoints = this.getPreferredAvailableEndpoints(nextLocationInfo.availableReadEndpointByLocation, nextLocationInfo.availableReadLocations, OperationType.Read, nextLocationInfo.writeEndpoints.get(0)); this.lastCacheUpdateTimestamp = Instant.now(); logger.debug("updating location cache finished, new readLocations [{}], new writeLocations [{}]", nextLocationInfo.readEndpoints, nextLocationInfo.writeEndpoints); this.locationInfo = nextLocationInfo; } } private UnmodifiableList<URI> getPreferredAvailableEndpoints(UnmodifiableMap<String, URI> endpointsByLocation, UnmodifiableList<String> orderedLocations, OperationType expectedAvailableOperation, URI fallbackEndpoint) { List<URI> endpoints = new ArrayList<>(); DatabaseAccountLocationsInfo currentLocationInfo = this.locationInfo; if (this.enableEndpointDiscovery) { if (this.canUseMultipleWriteLocations() || expectedAvailableOperation.supports(OperationType.Read)) { List<URI> unavailableEndpoints = new ArrayList<>(); for (String location: currentLocationInfo.preferredLocations) { Utils.ValueHolder<URI> endpoint = new Utils.ValueHolder<>(); if (Utils.tryGetValue(endpointsByLocation, location, endpoint)) { if (this.isEndpointUnavailable(endpoint.v, expectedAvailableOperation)) { unavailableEndpoints.add(endpoint.v); } else { endpoints.add(endpoint.v); } } } if (endpoints.isEmpty()) { endpoints.add(fallbackEndpoint); } endpoints.addAll(unavailableEndpoints); } else { for (String location : orderedLocations) { Utils.ValueHolder<URI> endpoint = Utils.ValueHolder.initialize(null); if (!Strings.isNullOrEmpty(location) && Utils.tryGetValue(endpointsByLocation, location, endpoint)) { endpoints.add(endpoint.v); } } } } if (endpoints.isEmpty()) { endpoints.add(fallbackEndpoint); } return new UnmodifiableList<URI>(endpoints); } private UnmodifiableMap<String, URI> getEndpointByLocation(Iterable<DatabaseAccountLocation> locations, Utils.ValueHolder<UnmodifiableList<String>> orderedLocations, Utils.ValueHolder<UnmodifiableMap<URI, String>> regionMap) { Map<String, URI> endpointsByLocation = new CaseInsensitiveMap<>(); Map<URI, String> regionByEndpoint = new CaseInsensitiveMap<>(); List<String> parsedLocations = new ArrayList<>(); for (DatabaseAccountLocation location: locations) { if (!Strings.isNullOrEmpty(location.getName())) { try { URI endpoint = new URI(location.getEndpoint().toLowerCase(Locale.ROOT)); endpointsByLocation.put(location.getName().toLowerCase(Locale.ROOT), endpoint); regionByEndpoint.put(endpoint, location.getName().toLowerCase(Locale.ROOT)); parsedLocations.add(location.getName()); } catch (Exception e) { logger.warn("GetAvailableEndpointsByLocation() - skipping add for location = [{}] as it is location name is either empty or endpoint is malformed [{}]", location.getName(), location.getEndpoint()); } } } orderedLocations.v = new UnmodifiableList<String>(parsedLocations); regionMap.v = (UnmodifiableMap<URI, String>) UnmodifiableMap.<URI, String>unmodifiableMap(regionByEndpoint); return (UnmodifiableMap<String, URI>) UnmodifiableMap.<String, URI>unmodifiableMap(endpointsByLocation); } private boolean canUseMultipleWriteLocations() { return this.useMultipleWriteLocations && this.enableMultipleWriteLocations; } public boolean canUseMultipleWriteLocations(RxDocumentServiceRequest request) { return this.canUseMultipleWriteLocations() && (request.getResourceType() == ResourceType.Document || (request.getResourceType() == ResourceType.StoredProcedure && request.getOperationType() == com.azure.cosmos.implementation.OperationType.ExecuteJavaScript)); } private static class LocationUnavailabilityInfo { LocationUnavailabilityInfo(Instant instant, OperationType type) { this.LastUnavailabilityCheckTimeStamp = instant; this.UnavailableOperations = type; } public Instant LastUnavailabilityCheckTimeStamp; public OperationType UnavailableOperations; } private enum OperationType { None(0x0), Read(0x1), Write(0x2), ReadAndWrite(0x3); private final int flag; public boolean hasReadFlag() { return (flag & Read.flag) != 0; } public boolean hasWriteFlag() { return (flag & Write.flag) != 0; } public static OperationType combine(OperationType t1, OperationType t2) { switch (t1.flag | t2.flag) { case 0x0: return None; case 0x1: return Read; case 0x2: return Write; default: return ReadAndWrite; } } public boolean supports(OperationType type) { return (flag & type.flag) != 0; } OperationType(int flag) { this.flag = flag; } } private boolean durationPassed(Instant end, Instant start, Duration duration) { return end.minus(duration).isAfter(start); } private boolean unavailableLocationsExpirationTimePassed() { return durationPassed(Instant.now(), this.lastCacheUpdateTimestamp, this.unavailableLocationsExpirationTime); } static class DatabaseAccountLocationsInfo { private UnmodifiableList<String> preferredLocations; private UnmodifiableList<String> availableWriteLocations; private UnmodifiableList<String> availableReadLocations; private UnmodifiableMap<String, URI> availableWriteEndpointByLocation; private UnmodifiableMap<String, URI> availableReadEndpointByLocation; private UnmodifiableMap<URI, String> regionNameByWriteEndpoint; private UnmodifiableMap<URI, String> regionNameByReadEndpoint; private UnmodifiableList<URI> writeEndpoints; private UnmodifiableList<URI> readEndpoints; public DatabaseAccountLocationsInfo(List<String> preferredLocations, URI defaultEndpoint) { this.preferredLocations = new UnmodifiableList<>(preferredLocations.stream().map(loc -> loc.toLowerCase(Locale.ROOT)).collect(Collectors.toList())); this.availableWriteEndpointByLocation = (UnmodifiableMap<String, URI>) UnmodifiableMap.<String, URI>unmodifiableMap(new CaseInsensitiveMap<>()); this.availableReadEndpointByLocation = (UnmodifiableMap<String, URI>) UnmodifiableMap.<String, URI>unmodifiableMap(new CaseInsensitiveMap<>()); this.regionNameByWriteEndpoint = (UnmodifiableMap<URI, String>) UnmodifiableMap.<URI, String>unmodifiableMap(new CaseInsensitiveMap<>()); this.regionNameByReadEndpoint = (UnmodifiableMap<URI, String>) UnmodifiableMap.<URI, String>unmodifiableMap(new CaseInsensitiveMap<>()); this.availableReadLocations = new UnmodifiableList<>(Collections.emptyList()); this.availableWriteLocations = new UnmodifiableList<>(Collections.emptyList()); this.readEndpoints = new UnmodifiableList<>(Collections.singletonList(defaultEndpoint)); this.writeEndpoints = new UnmodifiableList<>(Collections.singletonList(defaultEndpoint)); } public DatabaseAccountLocationsInfo(DatabaseAccountLocationsInfo other) { this.preferredLocations = other.preferredLocations; this.availableWriteLocations = other.availableWriteLocations; this.availableReadLocations = other.availableReadLocations; this.availableWriteEndpointByLocation = other.availableWriteEndpointByLocation; this.regionNameByWriteEndpoint = other.regionNameByWriteEndpoint; this.regionNameByReadEndpoint = other.regionNameByReadEndpoint; this.availableReadEndpointByLocation = other.availableReadEndpointByLocation; this.writeEndpoints = other.writeEndpoints; this.readEndpoints = other.readEndpoints; } } }
class LocationCache { private final static Logger logger = LoggerFactory.getLogger(LocationCache.class); private final boolean enableEndpointDiscovery; private final URI defaultEndpoint; private final boolean useMultipleWriteLocations; private final Object lockObject; private final Duration unavailableLocationsExpirationTime; private final ConcurrentHashMap<URI, LocationUnavailabilityInfo> locationUnavailabilityInfoByEndpoint; private DatabaseAccountLocationsInfo locationInfo; private Instant lastCacheUpdateTimestamp; private boolean enableMultipleWriteLocations; public LocationCache( List<String> preferredLocations, URI defaultEndpoint, boolean enableEndpointDiscovery, boolean useMultipleWriteLocations, Configs configs) { this.locationInfo = new DatabaseAccountLocationsInfo(preferredLocations, defaultEndpoint); this.defaultEndpoint = defaultEndpoint; this.enableEndpointDiscovery = enableEndpointDiscovery; this.useMultipleWriteLocations = useMultipleWriteLocations; this.lockObject = new Object(); this.locationUnavailabilityInfoByEndpoint = new ConcurrentHashMap<>(); this.lastCacheUpdateTimestamp = Instant.MIN; this.enableMultipleWriteLocations = false; this.unavailableLocationsExpirationTime = Duration.ofSeconds(configs.getUnavailableLocationsExpirationTimeInSeconds()); } /** * Gets list of read endpoints ordered by * * 1. Preferred location * 2. Endpoint availability * @return */ public UnmodifiableList<URI> getReadEndpoints() { if (this.locationUnavailabilityInfoByEndpoint.size() > 0 && unavailableLocationsExpirationTimePassed()) { this.updateLocationCache(); } return this.locationInfo.readEndpoints; } /** * Gets list of write endpoints ordered by * 1. Preferred location * 2. Endpoint availability * @return */ public UnmodifiableList<URI> getWriteEndpoints() { if (this.locationUnavailabilityInfoByEndpoint.size() > 0 && unavailableLocationsExpirationTimePassed()) { this.updateLocationCache(); } return this.locationInfo.writeEndpoints; } /** * Marks the current location unavailable for read */ public void markEndpointUnavailableForRead(URI endpoint) { this.markEndpointUnavailable(endpoint, OperationType.Read); } /** * Marks the current location unavailable for write */ public void markEndpointUnavailableForWrite(URI endpoint) { this.markEndpointUnavailable(endpoint, OperationType.Write); } /** * Invoked when {@link DatabaseAccount} is read * @param databaseAccount READ DatabaseAccount */ public void onDatabaseAccountRead(DatabaseAccount databaseAccount) { this.updateLocationCache( databaseAccount.getWritableLocations(), databaseAccount.getReadableLocations(), null, BridgeInternal.isEnableMultipleWriteLocations(databaseAccount)); } void onLocationPreferenceChanged(UnmodifiableList<String> preferredLocations) { this.updateLocationCache( null, null , preferredLocations, null); } /** * Resolves request to service endpoint. * 1. If this is a write request * (a) If UseMultipleWriteLocations = true * (i) For document writes, resolve to most preferred and available write endpoint. * Once the endpoint is marked unavailable, it is moved to the end of available write endpoint. Current request will * be retried on next preferred available write endpoint. * (ii) For all other resources, always resolve to first/second (regardless of preferred locations) * write getEndpoint in {@link DatabaseAccount * Endpoint of first write location in {@link DatabaseAccount * write operation on all resource types (except during that region's failover). * Only during manual failover, client would retry write on second write location in {@link DatabaseAccount * (b) Else resolve the request to first write getEndpoint in {@link DatabaseAccount * second write getEndpoint in {@link DatabaseAccount * 2. Else resolve the request to most preferred available read getEndpoint (getAutomatic failover for read requests) * @param request Request for which getEndpoint is to be resolved * @return Resolved getEndpoint */ public URI resolveServiceEndpoint(RxDocumentServiceRequest request) { Objects.requireNonNull(request.requestContext, "RxDocumentServiceRequest.requestContext is required and cannot be null."); if(request.requestContext.locationEndpointToRoute != null) { return request.requestContext.locationEndpointToRoute; } int locationIndex = Utils.getValueOrDefault(request.requestContext.locationIndexToRoute, 0); boolean usePreferredLocations = request.requestContext.usePreferredLocations != null ? request.requestContext.usePreferredLocations : true; if(!usePreferredLocations || (request.getOperationType().isWriteOperation() && !this.canUseMultipleWriteLocations(request))) { DatabaseAccountLocationsInfo currentLocationInfo = this.locationInfo; if(this.enableEndpointDiscovery && currentLocationInfo.availableWriteLocations.size() > 0) { locationIndex = Math.min(locationIndex%2, currentLocationInfo.availableWriteLocations.size()-1); String writeLocation = currentLocationInfo.availableWriteLocations.get(locationIndex); return currentLocationInfo.availableWriteEndpointByLocation.get(writeLocation); } else { return this.defaultEndpoint; } } else { UnmodifiableList<URI> endpoints = request.getOperationType().isWriteOperation()? this.getWriteEndpoints() : this.getReadEndpoints(); return endpoints.get(locationIndex % endpoints.size()); } } public boolean shouldRefreshEndpoints(Utils.ValueHolder<Boolean> canRefreshInBackground) { canRefreshInBackground.v = true; DatabaseAccountLocationsInfo currentLocationInfo = this.locationInfo; String mostPreferredLocation = Utils.firstOrDefault(currentLocationInfo.preferredLocations); if (this.enableEndpointDiscovery) { boolean shouldRefresh = this.useMultipleWriteLocations && !this.enableMultipleWriteLocations; List<URI> readLocationEndpoints = currentLocationInfo.readEndpoints; if (this.isEndpointUnavailable(readLocationEndpoints.get(0), OperationType.Read)) { canRefreshInBackground.v = anyEndpointsAvailable(readLocationEndpoints,OperationType.Read); logger.debug("shouldRefreshEndpoints = true, since the first read endpoint " + "[{}] is not available for read. canRefreshInBackground = [{}]", readLocationEndpoints.get(0), canRefreshInBackground.v); return true; } if (!Strings.isNullOrEmpty(mostPreferredLocation)) { Utils.ValueHolder<URI> mostPreferredReadEndpointHolder = new Utils.ValueHolder<>(); logger.debug("getReadEndpoints [{}]", readLocationEndpoints); if (Utils.tryGetValue(currentLocationInfo.availableReadEndpointByLocation, mostPreferredLocation, mostPreferredReadEndpointHolder)) { logger.debug("most preferred is [{}], most preferred available is [{}]", mostPreferredLocation, mostPreferredReadEndpointHolder.v); if (!areEqual(mostPreferredReadEndpointHolder.v, readLocationEndpoints.get(0))) { logger.debug("shouldRefreshEndpoints = true, most preferred location [{}]" + " is not available for read.", mostPreferredLocation); return true; } logger.debug("most preferred is [{}], and most preferred available [{}] are the same", mostPreferredLocation, mostPreferredReadEndpointHolder.v); } else { logger.debug("shouldRefreshEndpoints = true, most preferred location [{}] " + "is not in available read locations.", mostPreferredLocation); return true; } } Utils.ValueHolder<URI> mostPreferredWriteEndpointHolder = new Utils.ValueHolder<>(); List<URI> writeLocationEndpoints = currentLocationInfo.writeEndpoints; logger.debug("getWriteEndpoints [{}]", writeLocationEndpoints); if (!this.canUseMultipleWriteLocations()) { if (this.isEndpointUnavailable(writeLocationEndpoints.get(0), OperationType.Write)) { canRefreshInBackground.v = anyEndpointsAvailable(writeLocationEndpoints,OperationType.Write); logger.debug("shouldRefreshEndpoints = true, most preferred location " + "[{}] endpoint [{}] is not available for write. canRefreshInBackground = [{}]", mostPreferredLocation, writeLocationEndpoints.get(0), canRefreshInBackground.v); return true; } else { logger.debug("shouldRefreshEndpoints: false, [{}] is available for Write", writeLocationEndpoints.get(0)); return shouldRefresh; } } else if (!Strings.isNullOrEmpty(mostPreferredLocation)) { if (Utils.tryGetValue(currentLocationInfo.availableWriteEndpointByLocation, mostPreferredLocation, mostPreferredWriteEndpointHolder)) { shouldRefresh = ! areEqual(mostPreferredWriteEndpointHolder.v,writeLocationEndpoints.get(0)); if (shouldRefresh) { logger.debug("shouldRefreshEndpoints: true, write endpoint [{}] is not the same as most preferred [{}]", writeLocationEndpoints.get(0), mostPreferredWriteEndpointHolder.v); } else { logger.debug("shouldRefreshEndpoints: false, write endpoint [{}] is the same as most preferred [{}]", writeLocationEndpoints.get(0), mostPreferredWriteEndpointHolder.v); } return shouldRefresh; } else { logger.debug("shouldRefreshEndpoints = true, most preferred location [{}] is not in available write locations", mostPreferredLocation); return true; } } else { logger.debug("shouldRefreshEndpoints: false, mostPreferredLocation [{}] is empty", mostPreferredLocation); return shouldRefresh; } } else { logger.debug("shouldRefreshEndpoints: false, endpoint discovery not enabled"); return false; } } private boolean areEqual(URI url1, URI url2) { return url1.equals(url2); } private void clearStaleEndpointUnavailabilityInfo() { if (!this.locationUnavailabilityInfoByEndpoint.isEmpty()) { List<URI> unavailableEndpoints = new ArrayList<>(this.locationUnavailabilityInfoByEndpoint.keySet()); for (URI unavailableEndpoint: unavailableEndpoints) { Utils.ValueHolder<LocationUnavailabilityInfo> unavailabilityInfoHolder = new Utils.ValueHolder<>(); Utils.ValueHolder<LocationUnavailabilityInfo> removedHolder = new Utils.ValueHolder<>(); if (Utils.tryGetValue(this.locationUnavailabilityInfoByEndpoint, unavailableEndpoint, unavailabilityInfoHolder) && durationPassed(Instant.now(), unavailabilityInfoHolder.v.LastUnavailabilityCheckTimeStamp, this.unavailableLocationsExpirationTime) && Utils.tryRemove(this.locationUnavailabilityInfoByEndpoint, unavailableEndpoint, removedHolder)) { logger.debug( "Removed endpoint [{}] unavailable for operations [{}] from unavailableEndpoints", unavailableEndpoint, unavailabilityInfoHolder.v.UnavailableOperations); } } } } private boolean isEndpointUnavailable(URI endpoint, OperationType expectedAvailableOperations) { Utils.ValueHolder<LocationUnavailabilityInfo> unavailabilityInfoHolder = new Utils.ValueHolder<>(); if (expectedAvailableOperations == OperationType.None || !Utils.tryGetValue(this.locationUnavailabilityInfoByEndpoint, endpoint, unavailabilityInfoHolder) || !unavailabilityInfoHolder.v.UnavailableOperations.supports(expectedAvailableOperations)) { return false; } else { if (durationPassed(Instant.now(), unavailabilityInfoHolder.v.LastUnavailabilityCheckTimeStamp, this.unavailableLocationsExpirationTime)) { return false; } else { logger.debug( "Endpoint [{}] unavailable for operations [{}] present in unavailableEndpoints", endpoint, unavailabilityInfoHolder.v.UnavailableOperations); return true; } } } private boolean anyEndpointsAvailable(List<URI> endpoints, OperationType expectedAvailableOperations) { Utils.ValueHolder<LocationUnavailabilityInfo> unavailabilityInfoHolder = new Utils.ValueHolder<>(); boolean anyEndpointsAvailable = false; for (URI endpoint : endpoints) { if (!isEndpointUnavailable(endpoint, expectedAvailableOperations)) { anyEndpointsAvailable = true; break; } } return anyEndpointsAvailable; } private void markEndpointUnavailable( URI unavailableEndpoint, OperationType unavailableOperationType) { Instant currentTime = Instant.now(); LocationUnavailabilityInfo updatedInfo = this.locationUnavailabilityInfoByEndpoint.compute( unavailableEndpoint, new BiFunction<URI, LocationUnavailabilityInfo, LocationUnavailabilityInfo>() { @Override public LocationUnavailabilityInfo apply(URI url, LocationUnavailabilityInfo info) { if (info == null) { return new LocationUnavailabilityInfo(currentTime, unavailableOperationType); } else { info.LastUnavailabilityCheckTimeStamp = currentTime; info.UnavailableOperations = OperationType.combine(info.UnavailableOperations, unavailableOperationType); return info; } } }); this.updateLocationCache(); logger.debug( "Endpoint [{}] unavailable for [{}] added/updated to unavailableEndpoints with timestamp [{}]", unavailableEndpoint, unavailableOperationType, updatedInfo.LastUnavailabilityCheckTimeStamp); } private void updateLocationCache(){ updateLocationCache(null, null, null, null); } private void updateLocationCache( Iterable<DatabaseAccountLocation> writeLocations, Iterable<DatabaseAccountLocation> readLocations, UnmodifiableList<String> preferenceList, Boolean enableMultipleWriteLocations) { synchronized (this.lockObject) { DatabaseAccountLocationsInfo nextLocationInfo = new DatabaseAccountLocationsInfo(this.locationInfo); logger.debug("updating location cache ..., current readLocations [{}], current writeLocations [{}]", nextLocationInfo.readEndpoints, nextLocationInfo.writeEndpoints); if (preferenceList != null) { nextLocationInfo.preferredLocations = preferenceList; } if (enableMultipleWriteLocations != null) { this.enableMultipleWriteLocations = enableMultipleWriteLocations; } this.clearStaleEndpointUnavailabilityInfo(); if (readLocations != null) { Utils.ValueHolder<UnmodifiableList<String>> out = Utils.ValueHolder.initialize(nextLocationInfo.availableReadLocations); Utils.ValueHolder<UnmodifiableMap<URI, String>> outReadRegionMap = Utils.ValueHolder.initialize(nextLocationInfo.regionNameByReadEndpoint); nextLocationInfo.availableReadEndpointByLocation = this.getEndpointByLocation(readLocations, out, outReadRegionMap); nextLocationInfo.availableReadLocations = out.v; nextLocationInfo.regionNameByReadEndpoint = outReadRegionMap.v; } if (writeLocations != null) { Utils.ValueHolder<UnmodifiableList<String>> out = Utils.ValueHolder.initialize(nextLocationInfo.availableWriteLocations); Utils.ValueHolder<UnmodifiableMap<URI, String>> outWriteRegionMap = Utils.ValueHolder.initialize(nextLocationInfo.regionNameByWriteEndpoint); nextLocationInfo.availableWriteEndpointByLocation = this.getEndpointByLocation(writeLocations, out, outWriteRegionMap); nextLocationInfo.availableWriteLocations = out.v; nextLocationInfo.regionNameByWriteEndpoint = outWriteRegionMap.v; } nextLocationInfo.writeEndpoints = this.getPreferredAvailableEndpoints(nextLocationInfo.availableWriteEndpointByLocation, nextLocationInfo.availableWriteLocations, OperationType.Write, this.defaultEndpoint); nextLocationInfo.readEndpoints = this.getPreferredAvailableEndpoints(nextLocationInfo.availableReadEndpointByLocation, nextLocationInfo.availableReadLocations, OperationType.Read, nextLocationInfo.writeEndpoints.get(0)); this.lastCacheUpdateTimestamp = Instant.now(); logger.debug("updating location cache finished, new readLocations [{}], new writeLocations [{}]", nextLocationInfo.readEndpoints, nextLocationInfo.writeEndpoints); this.locationInfo = nextLocationInfo; } } private UnmodifiableList<URI> getPreferredAvailableEndpoints(UnmodifiableMap<String, URI> endpointsByLocation, UnmodifiableList<String> orderedLocations, OperationType expectedAvailableOperation, URI fallbackEndpoint) { List<URI> endpoints = new ArrayList<>(); DatabaseAccountLocationsInfo currentLocationInfo = this.locationInfo; if (this.enableEndpointDiscovery) { if (this.canUseMultipleWriteLocations() || expectedAvailableOperation.supports(OperationType.Read)) { List<URI> unavailableEndpoints = new ArrayList<>(); for (String location: currentLocationInfo.preferredLocations) { Utils.ValueHolder<URI> endpoint = new Utils.ValueHolder<>(); if (Utils.tryGetValue(endpointsByLocation, location, endpoint)) { if (this.isEndpointUnavailable(endpoint.v, expectedAvailableOperation)) { unavailableEndpoints.add(endpoint.v); } else { endpoints.add(endpoint.v); } } } if (endpoints.isEmpty()) { endpoints.add(fallbackEndpoint); } endpoints.addAll(unavailableEndpoints); } else { for (String location : orderedLocations) { Utils.ValueHolder<URI> endpoint = Utils.ValueHolder.initialize(null); if (!Strings.isNullOrEmpty(location) && Utils.tryGetValue(endpointsByLocation, location, endpoint)) { endpoints.add(endpoint.v); } } } } if (endpoints.isEmpty()) { endpoints.add(fallbackEndpoint); } return new UnmodifiableList<URI>(endpoints); } private UnmodifiableMap<String, URI> getEndpointByLocation(Iterable<DatabaseAccountLocation> locations, Utils.ValueHolder<UnmodifiableList<String>> orderedLocations, Utils.ValueHolder<UnmodifiableMap<URI, String>> regionMap) { Map<String, URI> endpointsByLocation = new CaseInsensitiveMap<>(); Map<URI, String> regionByEndpoint = new CaseInsensitiveMap<>(); List<String> parsedLocations = new ArrayList<>(); for (DatabaseAccountLocation location: locations) { if (!Strings.isNullOrEmpty(location.getName())) { try { URI endpoint = new URI(location.getEndpoint().toLowerCase(Locale.ROOT)); endpointsByLocation.put(location.getName().toLowerCase(Locale.ROOT), endpoint); regionByEndpoint.put(endpoint, location.getName().toLowerCase(Locale.ROOT)); parsedLocations.add(location.getName()); } catch (Exception e) { logger.warn("GetAvailableEndpointsByLocation() - skipping add for location = [{}] as it is location name is either empty or endpoint is malformed [{}]", location.getName(), location.getEndpoint()); } } } orderedLocations.v = new UnmodifiableList<String>(parsedLocations); regionMap.v = (UnmodifiableMap<URI, String>) UnmodifiableMap.<URI, String>unmodifiableMap(regionByEndpoint); return (UnmodifiableMap<String, URI>) UnmodifiableMap.<String, URI>unmodifiableMap(endpointsByLocation); } private boolean canUseMultipleWriteLocations() { return this.useMultipleWriteLocations && this.enableMultipleWriteLocations; } public boolean canUseMultipleWriteLocations(RxDocumentServiceRequest request) { return this.canUseMultipleWriteLocations() && (request.getResourceType() == ResourceType.Document || (request.getResourceType() == ResourceType.StoredProcedure && request.getOperationType() == com.azure.cosmos.implementation.OperationType.ExecuteJavaScript)); } private static class LocationUnavailabilityInfo { LocationUnavailabilityInfo(Instant instant, OperationType type) { this.LastUnavailabilityCheckTimeStamp = instant; this.UnavailableOperations = type; } public Instant LastUnavailabilityCheckTimeStamp; public OperationType UnavailableOperations; } private enum OperationType { None(0x0), Read(0x1), Write(0x2), ReadAndWrite(0x3); private final int flag; public boolean hasReadFlag() { return (flag & Read.flag) != 0; } public boolean hasWriteFlag() { return (flag & Write.flag) != 0; } public static OperationType combine(OperationType t1, OperationType t2) { switch (t1.flag | t2.flag) { case 0x0: return None; case 0x1: return Read; case 0x2: return Write; default: return ReadAndWrite; } } public boolean supports(OperationType type) { return (flag & type.flag) != 0; } OperationType(int flag) { this.flag = flag; } } private boolean durationPassed(Instant end, Instant start, Duration duration) { return end.minus(duration).isAfter(start); } private boolean unavailableLocationsExpirationTimePassed() { return durationPassed(Instant.now(), this.lastCacheUpdateTimestamp, this.unavailableLocationsExpirationTime); } static class DatabaseAccountLocationsInfo { private UnmodifiableList<String> preferredLocations; private UnmodifiableList<String> availableWriteLocations; private UnmodifiableList<String> availableReadLocations; private UnmodifiableMap<String, URI> availableWriteEndpointByLocation; private UnmodifiableMap<String, URI> availableReadEndpointByLocation; private UnmodifiableMap<URI, String> regionNameByWriteEndpoint; private UnmodifiableMap<URI, String> regionNameByReadEndpoint; private UnmodifiableList<URI> writeEndpoints; private UnmodifiableList<URI> readEndpoints; public DatabaseAccountLocationsInfo(List<String> preferredLocations, URI defaultEndpoint) { this.preferredLocations = new UnmodifiableList<>(preferredLocations.stream().map(loc -> loc.toLowerCase(Locale.ROOT)).collect(Collectors.toList())); this.availableWriteEndpointByLocation = (UnmodifiableMap<String, URI>) UnmodifiableMap.<String, URI>unmodifiableMap(new CaseInsensitiveMap<>()); this.availableReadEndpointByLocation = (UnmodifiableMap<String, URI>) UnmodifiableMap.<String, URI>unmodifiableMap(new CaseInsensitiveMap<>()); this.regionNameByWriteEndpoint = (UnmodifiableMap<URI, String>) UnmodifiableMap.<URI, String>unmodifiableMap(new CaseInsensitiveMap<>()); this.regionNameByReadEndpoint = (UnmodifiableMap<URI, String>) UnmodifiableMap.<URI, String>unmodifiableMap(new CaseInsensitiveMap<>()); this.availableReadLocations = new UnmodifiableList<>(Collections.emptyList()); this.availableWriteLocations = new UnmodifiableList<>(Collections.emptyList()); this.readEndpoints = new UnmodifiableList<>(Collections.singletonList(defaultEndpoint)); this.writeEndpoints = new UnmodifiableList<>(Collections.singletonList(defaultEndpoint)); } public DatabaseAccountLocationsInfo(DatabaseAccountLocationsInfo other) { this.preferredLocations = other.preferredLocations; this.availableWriteLocations = other.availableWriteLocations; this.availableReadLocations = other.availableReadLocations; this.availableWriteEndpointByLocation = other.availableWriteEndpointByLocation; this.regionNameByWriteEndpoint = other.regionNameByWriteEndpoint; this.regionNameByReadEndpoint = other.regionNameByReadEndpoint; this.availableReadEndpointByLocation = other.availableReadEndpointByLocation; this.writeEndpoints = other.writeEndpoints; this.readEndpoints = other.readEndpoints; } } }
Changes to this file can be reverted.
private static String privilegedGetProperty(String theProp, String defaultVal) { return java.security.AccessController.doPrivileged( (PrivilegedAction<String>) () -> { String value = System.getProperty(theProp, ""); return (value.isEmpty()) ? defaultVal : value; }); }
return java.security.AccessController.doPrivileged(
private static String privilegedGetProperty(String theProp, String defaultVal) { return java.security.AccessController.doPrivileged( (PrivilegedAction<String>) () -> { String value = System.getProperty(theProp, ""); return (value.isEmpty()) ? defaultVal : value; }); }
class JREKeyStoreFactory { private static final String JAVA_HOME = privilegedGetProperty("java.home", ""); private static final Path STORE_PATH = Paths.get(JAVA_HOME).resolve("lib").resolve("security"); private static final Path DEFAULT_STORE = STORE_PATH.resolve("cacerts"); private static final Path JSSE_DEFAULT_STORE = STORE_PATH.resolve("jssecacerts"); private static final String KEY_STORE_PASSWORD = privilegedGetProperty("javax.net.ssl.keyStorePassword", "changeit"); private static final Logger LOGGER = Logger.getLogger(JREKeyStoreFactory.class.getName()); private static final KeyStore JRE_KEY_STORE = getJreKeyStore(); private JREKeyStoreFactory() { } /** * This method returns the instance of JRE key store * @return the JRE key store. */ public static KeyStore getDefaultKeyStore() { return JRE_KEY_STORE; } private static KeyStore getJreKeyStore() { KeyStore defaultKeyStore = null; try { defaultKeyStore = KeyStore.getInstance(KeyStore.getDefaultType()); loadKeyStore(defaultKeyStore); } catch (KeyStoreException e) { LOGGER.log(WARNING, "Unable to get the jre key store.", e); } return defaultKeyStore; } private static void loadKeyStore(KeyStore ks) { try (InputStream inputStream = Files.newInputStream(getKeyStoreFile())) { ks.load(inputStream, KEY_STORE_PASSWORD.toCharArray()); } catch (IOException | NoSuchAlgorithmException | CertificateException e) { LOGGER.log(WARNING, "unable to load the jre key store", e); } } private static Path getKeyStoreFile() { return Stream.of(getConfiguredKeyStorePath(), JSSE_DEFAULT_STORE, DEFAULT_STORE) .filter(Objects::nonNull) .filter(Files::exists) .filter(Files::isReadable) .findFirst() .orElse(null); } private static Path getConfiguredKeyStorePath() { String configuredKeyStorePath = privilegedGetProperty("javax.net.ssl.keyStore", ""); return Optional.of(configuredKeyStorePath) .filter(path -> !path.isEmpty()) .map(Paths::get) .orElse(null); } @SuppressWarnings("removal") }
class JREKeyStoreFactory { private static final String JAVA_HOME = privilegedGetProperty("java.home", ""); private static final Path STORE_PATH = Paths.get(JAVA_HOME).resolve("lib").resolve("security"); private static final Path DEFAULT_STORE = STORE_PATH.resolve("cacerts"); private static final Path JSSE_DEFAULT_STORE = STORE_PATH.resolve("jssecacerts"); private static final String KEY_STORE_PASSWORD = privilegedGetProperty("javax.net.ssl.keyStorePassword", "changeit"); private static final Logger LOGGER = Logger.getLogger(JREKeyStoreFactory.class.getName()); private static final KeyStore JRE_KEY_STORE = getJreKeyStore(); private JREKeyStoreFactory() { } /** * This method returns the instance of JRE key store * @return the JRE key store. */ public static KeyStore getDefaultKeyStore() { return JRE_KEY_STORE; } private static KeyStore getJreKeyStore() { KeyStore defaultKeyStore = null; try { defaultKeyStore = KeyStore.getInstance(KeyStore.getDefaultType()); loadKeyStore(defaultKeyStore); } catch (KeyStoreException e) { LOGGER.log(WARNING, "Unable to get the jre key store.", e); } return defaultKeyStore; } private static void loadKeyStore(KeyStore ks) { try (InputStream inputStream = Files.newInputStream(getKeyStoreFile())) { ks.load(inputStream, KEY_STORE_PASSWORD.toCharArray()); } catch (IOException | NoSuchAlgorithmException | CertificateException e) { LOGGER.log(WARNING, "unable to load the jre key store", e); } } private static Path getKeyStoreFile() { return Stream.of(getConfiguredKeyStorePath(), JSSE_DEFAULT_STORE, DEFAULT_STORE) .filter(Objects::nonNull) .filter(Files::exists) .filter(Files::isReadable) .findFirst() .orElse(null); } private static Path getConfiguredKeyStorePath() { String configuredKeyStorePath = privilegedGetProperty("javax.net.ssl.keyStore", ""); return Optional.of(configuredKeyStorePath) .filter(path -> !path.isEmpty()) .map(Paths::get) .orElse(null); } @SuppressWarnings("removal") }
haha 💣
public void crudOperationsOnItems() { Pojo pojo = new Pojo(); pojo.setSensitiveString("Sensitive Information need to be encrypted"); cosmosEncryptionAsyncContainer.createItem(pojo) .flatMap(response -> { System.out.println("Created item: " + response.getItem()); return cosmosEncryptionAsyncContainer.readItem(response.getItem().getId(), new PartitionKey(response.getItem().getId()), Pojo.class); }) .flatMap(response -> { System.out.println("Read item: " + response.getItem()); Pojo p = response.getItem(); pojo.setSensitiveString("New Sensitive Information"); return cosmosEncryptionAsyncContainer.replaceItem(p, response.getItem().getId(), new PartitionKey(response.getItem().getId())); }) .flatMap(response -> cosmosEncryptionAsyncContainer.deleteItem(response.getItem().getId(), new PartitionKey(response.getItem().getId()))) .subscribe(); }
public void crudOperationsOnItems() { Pojo pojo = new Pojo(); pojo.setSensitiveString("Sensitive Information need to be encrypted"); cosmosEncryptionAsyncContainer.createItem(pojo) .flatMap(response -> { System.out.println("Created item: " + response.getItem()); return cosmosEncryptionAsyncContainer.readItem(response.getItem().getId(), new PartitionKey(response.getItem().getId()), Pojo.class); }) .flatMap(response -> { System.out.println("Read item: " + response.getItem()); Pojo p = response.getItem(); pojo.setSensitiveString("New Sensitive Information"); return cosmosEncryptionAsyncContainer.replaceItem(p, response.getItem().getId(), new PartitionKey(response.getItem().getId())); }) .flatMap(response -> cosmosEncryptionAsyncContainer.deleteItem(response.getItem().getId(), new PartitionKey(response.getItem().getId()))) .subscribe(); }
class ReadmeSamples { private final TokenCredential tokenCredentials = new EnvironmentCredentialBuilder().build(); private final CosmosAsyncClient cosmosAsyncClient = new CosmosClientBuilder() .endpoint("<YOUR ENDPOINT HERE>") .key("<YOUR KEY HERE>") .buildAsyncClient(); private final CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(cosmosAsyncClient, new AzureKeyVaultKeyStoreProvider(tokenCredentials)); private final EncryptionKeyStoreProvider encryptionKeyStoreProvider = new AzureKeyVaultKeyStoreProvider(tokenCredentials); private final CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase = cosmosEncryptionAsyncClient .getCosmosEncryptionAsyncDatabase("<YOUR DATABASE NAME>"); private final CosmosEncryptionAsyncContainer cosmosEncryptionAsyncContainer = cosmosEncryptionAsyncDatabase .getCosmosEncryptionAsyncContainer("<YOUR CONTAINER NAME>"); public ReadmeSamples() throws MicrosoftDataEncryptionException { } public void createCosmosEncryptionClient() throws MicrosoftDataEncryptionException { CosmosAsyncClient cosmosAsyncClient = new CosmosClientBuilder() .endpoint("<YOUR ENDPOINT HERE>") .key("<YOUR KEY HERE>") .buildAsyncClient(); CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(cosmosAsyncClient, new AzureKeyVaultKeyStoreProvider(tokenCredentials)); } public void createCosmosEncryptionDatabase() { CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase = cosmosEncryptionAsyncClient.getCosmosAsyncClient() .createDatabaseIfNotExists("<YOUR DATABASE NAME>") .map(databaseResponse -> cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(databaseResponse.getProperties().getId())) .block(); } public void createCosmosEncryptionContainer() { EncryptionKeyWrapMetadata metadata = new EncryptionKeyWrapMetadata(encryptionKeyStoreProvider.getProviderName(), "key", "tempmetadata"); CosmosEncryptionAsyncContainer cosmosEncryptionAsyncContainer = cosmosEncryptionAsyncDatabase .createClientEncryptionKey("key", CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256, metadata) .then(Mono.defer(() -> { ClientEncryptionIncludedPath includedPath = new ClientEncryptionIncludedPath(); includedPath.setClientEncryptionKeyId("key"); includedPath.setPath("/sensitiveString"); includedPath.setEncryptionType(CosmosEncryptionType.DETERMINISTIC); includedPath.setEncryptionAlgorithm(CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256); List<ClientEncryptionIncludedPath> paths = new ArrayList<>(); paths.add(includedPath); ClientEncryptionPolicy clientEncryptionPolicy = new ClientEncryptionPolicy(paths); CosmosContainerProperties properties = new CosmosContainerProperties("<YOUR CONTAINER NAME>", "/mypk"); properties.setClientEncryptionPolicy(clientEncryptionPolicy); return cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().createContainer(properties); })) .map(containerResponse -> cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(containerResponse.getProperties().getId())) .block(); } private static final class Pojo { private final String id; private String sensitiveString; Pojo() { this.id = UUID.randomUUID().toString(); } String getId () { return id; } void setSensitiveString(String sensitiveString) { this.sensitiveString = sensitiveString; } } }
class ReadmeSamples { private final TokenCredential tokenCredentials = new EnvironmentCredentialBuilder().build(); private final CosmosAsyncClient cosmosAsyncClient = new CosmosClientBuilder() .endpoint("<YOUR ENDPOINT HERE>") .key("<YOUR KEY HERE>") .buildAsyncClient(); private final CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(cosmosAsyncClient, new AzureKeyVaultKeyStoreProvider(tokenCredentials)); private final EncryptionKeyStoreProvider encryptionKeyStoreProvider = new AzureKeyVaultKeyStoreProvider(tokenCredentials); private final CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase = cosmosEncryptionAsyncClient .getCosmosEncryptionAsyncDatabase("<YOUR DATABASE NAME>"); private final CosmosEncryptionAsyncContainer cosmosEncryptionAsyncContainer = cosmosEncryptionAsyncDatabase .getCosmosEncryptionAsyncContainer("<YOUR CONTAINER NAME>"); public ReadmeSamples() throws MicrosoftDataEncryptionException { } public void createCosmosEncryptionClient() throws MicrosoftDataEncryptionException { CosmosAsyncClient cosmosAsyncClient = new CosmosClientBuilder() .endpoint("<YOUR ENDPOINT HERE>") .key("<YOUR KEY HERE>") .buildAsyncClient(); CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(cosmosAsyncClient, new AzureKeyVaultKeyStoreProvider(tokenCredentials)); } public void createCosmosEncryptionDatabase() { CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase = cosmosEncryptionAsyncClient.getCosmosAsyncClient() .createDatabaseIfNotExists("<YOUR DATABASE NAME>") .map(databaseResponse -> cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(databaseResponse.getProperties().getId())) .block(); } public void createCosmosEncryptionContainer() { EncryptionKeyWrapMetadata metadata = new EncryptionKeyWrapMetadata(encryptionKeyStoreProvider.getProviderName(), "key", "tempmetadata"); CosmosEncryptionAsyncContainer cosmosEncryptionAsyncContainer = cosmosEncryptionAsyncDatabase .createClientEncryptionKey("key", CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256, metadata) .then(Mono.defer(() -> { ClientEncryptionIncludedPath includedPath = new ClientEncryptionIncludedPath(); includedPath.setClientEncryptionKeyId("key"); includedPath.setPath("/sensitiveString"); includedPath.setEncryptionType(CosmosEncryptionType.DETERMINISTIC); includedPath.setEncryptionAlgorithm(CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256); List<ClientEncryptionIncludedPath> paths = new ArrayList<>(); paths.add(includedPath); ClientEncryptionPolicy clientEncryptionPolicy = new ClientEncryptionPolicy(paths); CosmosContainerProperties properties = new CosmosContainerProperties("<YOUR CONTAINER NAME>", "/mypk"); properties.setClientEncryptionPolicy(clientEncryptionPolicy); return cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().createContainer(properties); })) .map(containerResponse -> cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(containerResponse.getProperties().getId())) .block(); } private static final class Pojo { private final String id; private String sensitiveString; Pojo() { this.id = UUID.randomUUID().toString(); } String getId () { return id; } void setSensitiveString(String sensitiveString) { this.sensitiveString = sensitiveString; } } }
Since this is a requirement for both add and remove calls should be disallow creating options type with these values as null?
Mono<Response<PolicyCertificatesModificationResult>> addPolicyManagementCertificateWithResponse(PolicyManagementCertificateOptions options, Context context) { Objects.requireNonNull(options.getCertificate()); Objects.requireNonNull(options.getAttestationSigner()); final AttestationTokenValidationOptions finalOptions = this.tokenValidationOptions; String base64Certificate = null; try { base64Certificate = Base64.getEncoder().encodeToString(options.getCertificate().getEncoded()); } catch (CertificateEncodingException e) { throw logger.logExceptionAsError(new RuntimeException(e.getMessage())); } JsonWebKey jwk = new JsonWebKey(options.getCertificate().getType()) .setX5C(new ArrayList<String>()); jwk.getX5C().add(base64Certificate); AttestationCertificateManagementBody certificateBody = new AttestationCertificateManagementBody() .setPolicyCertificate(jwk); AttestationToken addToken = null; try { addToken = AttestationTokenImpl.createSecuredToken(SERIALIZER_ADAPTER.serialize(certificateBody, SerializerEncoding.JSON), options.getAttestationSigner()); } catch (IOException e) { throw logger.logExceptionAsError(new RuntimeException(e.getMessage())); } return this.certificatesImpl.addWithResponseAsync(addToken.serialize(), context) .flatMap(response -> { Response<AttestationTokenImpl> token = Utilities.generateResponseFromModelType(response, new AttestationTokenImpl(response.getValue().getToken())); return getCachedAttestationSigners() .map(signers -> { token.getValue().validate(signers, finalOptions); PolicyCertificatesModificationResult addResult = PolicyCertificatesModificationResultImpl.fromGenerated(token.getValue().getBody(com.azure.security.attestation.implementation.models.PolicyCertificatesModificationResult.class)); return Utilities.generateAttestationResponseFromModelType(response, token.getValue(), addResult); }); }); }
Objects.requireNonNull(options.getAttestationSigner());
new RuntimeException(e.getMessage())); } } private AttestationToken generatePolicySetToken(String policy, AttestationSigningKey signer) { String serializedPolicy = null; if (policy != null) { StoredAttestationPolicy policyToSet = new StoredAttestationPolicy(); policyToSet.setAttestationPolicy(policy.getBytes(StandardCharsets.UTF_8)); try { serializedPolicy = SERIALIZER_ADAPTER.serialize(policyToSet, SerializerEncoding.JSON); } catch (IOException e) { throw logger.logExceptionAsError(new RuntimeException(e.getMessage())); } } AttestationToken setToken; if (signer == null) { if (policy != null) { setToken = AttestationTokenImpl.createUnsecuredToken(serializedPolicy); } else { setToken = AttestationTokenImpl.createUnsecuredToken(); } } else { if (policy != null) { setToken = AttestationTokenImpl.createSecuredToken(serializedPolicy, signer); } else { setToken = AttestationTokenImpl.createSecuredToken(signer); } } return setToken; }
class is the SHA-256 hash * of the underlying policy set JSON Web Token sent to the attestation service. * * This helper API allows the caller to independently calculate SHA-256 hash of an * attestation token corresponding to the value which would be sent to the attestation * service. * * The value returned by this API must always match the value in the {@link PolicyResult}
class is the SHA-256 hash * of the underlying policy set JSON Web Token sent to the attestation service. * * This helper API allows the caller to independently calculate SHA-256 hash of an * attestation token corresponding to the value which would be sent to the attestation * service. * * The value returned by this API must always match the value in the {@link PolicyResult}
From food industry to hospitality? Why the change of heart ;)
public static void main(String[] args) { SearchIndexClient client = new SearchIndexClientBuilder() .endpoint(ENDPOINT) .credential(new AzureKeyCredential(ADMIN_KEY)) .buildClient(); String indexName = "hotels"; SearchIndex newIndex = new SearchIndex(indexName, Arrays.asList( new SearchField("hotelId", SearchFieldDataType.STRING) .setKey(true) .setFilterable(true) .setSortable(true), new SearchField("hotelName", SearchFieldDataType.STRING) .setSearchable(true) .setFilterable(true) .setSortable(true), new SearchField("description", SearchFieldDataType.STRING) .setSearchable(true) .setAnalyzerName(LexicalAnalyzerName.EN_LUCENE), new SearchField("descriptionFr", SearchFieldDataType.STRING) .setSearchable(true) .setAnalyzerName(LexicalAnalyzerName.FR_LUCENE), new SearchField("tags", SearchFieldDataType.collection(SearchFieldDataType.STRING)) .setSearchable(true) .setFilterable(true) .setFacetable(true), new SearchField("address", SearchFieldDataType.COMPLEX) .setFields( new SearchField("streetAddress", SearchFieldDataType.STRING) .setSearchable(true), new SearchField("city", SearchFieldDataType.STRING) .setFilterable(true) .setSortable(true) .setFacetable(true), new SearchField("stateProvince", SearchFieldDataType.STRING) .setSearchable(true) .setFilterable(true) .setSortable(true) .setFacetable(true), new SearchField("country", SearchFieldDataType.STRING) .setSearchable(true) .setSynonymMapNames("synonymMapName") .setFilterable(true) .setSortable(true) .setFacetable(true), new SearchField("postalCode", SearchFieldDataType.STRING) .setSearchable(true) .setFilterable(true) .setSortable(true) .setFacetable(true)) )); client.createIndex(newIndex); client.deleteIndex(indexName); }
String indexName = "hotels";
public static void main(String[] args) { SearchIndexClient client = new SearchIndexClientBuilder() .endpoint(ENDPOINT) .credential(new AzureKeyCredential(ADMIN_KEY)) .buildClient(); String indexName = "hotels"; SearchIndex newIndex = new SearchIndex(indexName, Arrays.asList( new SearchField("hotelId", SearchFieldDataType.STRING) .setKey(true) .setFilterable(true) .setSortable(true), new SearchField("hotelName", SearchFieldDataType.STRING) .setSearchable(true) .setFilterable(true) .setSortable(true), new SearchField("description", SearchFieldDataType.STRING) .setSearchable(true) .setAnalyzerName(LexicalAnalyzerName.EN_LUCENE), new SearchField("descriptionFr", SearchFieldDataType.STRING) .setSearchable(true) .setAnalyzerName(LexicalAnalyzerName.FR_LUCENE), new SearchField("tags", SearchFieldDataType.collection(SearchFieldDataType.STRING)) .setSearchable(true) .setFilterable(true) .setFacetable(true), new SearchField("address", SearchFieldDataType.COMPLEX) .setFields( new SearchField("streetAddress", SearchFieldDataType.STRING) .setSearchable(true), new SearchField("city", SearchFieldDataType.STRING) .setFilterable(true) .setSortable(true) .setFacetable(true), new SearchField("stateProvince", SearchFieldDataType.STRING) .setSearchable(true) .setFilterable(true) .setSortable(true) .setFacetable(true), new SearchField("country", SearchFieldDataType.STRING) .setSearchable(true) .setSynonymMapNames("synonymMapName") .setFilterable(true) .setSortable(true) .setFacetable(true), new SearchField("postalCode", SearchFieldDataType.STRING) .setSearchable(true) .setFilterable(true) .setSortable(true) .setFacetable(true)) )); client.createIndex(newIndex); client.deleteIndex(indexName); }
class CreateIndexExample { /** * From the Azure portal, get your Azure Cognitive Search service name and API key and populate ADMIN_KEY and * SEARCH_SERVICE_NAME. */ private static final String ENDPOINT = Configuration.getGlobalConfiguration().get("AZURE_COGNITIVE_SEARCH_ENDPOINT"); private static final String ADMIN_KEY = Configuration.getGlobalConfiguration().get("AZURE_COGNITIVE_SEARCH_API_KEY"); }
class CreateIndexExample { /** * From the Azure portal, get your Azure Cognitive Search service name and API key and populate ADMIN_KEY and * SEARCH_SERVICE_NAME. */ private static final String ENDPOINT = Configuration.getGlobalConfiguration().get("AZURE_COGNITIVE_SEARCH_ENDPOINT"); private static final String ADMIN_KEY = Configuration.getGlobalConfiguration().get("AZURE_COGNITIVE_SEARCH_API_KEY"); }
A item operation should be done to make sure all the cache are warmed up and initialized.
public void feedRange_RecreateContainerWithSameName() { String containerName = UUID.randomUUID().toString(); String databaseName = preExistingDatabaseId; try(CosmosAsyncClient clientUnderTest = cosmosClientBuilderUnderTest.buildAsyncClient()) { for (int i = 0; i < 2; i++) { CosmosContainerProperties cosmosContainerProperties = new CosmosContainerProperties(containerName, "/PE_Name"); houseKeepingClient.getDatabase(databaseName).createContainerIfNotExists(cosmosContainerProperties); List<FeedRange> rsp = clientUnderTest.getDatabase(databaseName).getContainer(containerName).getFeedRanges().block(); assertThat(rsp).isNotNull(); assertThat(rsp).hasSize(1); houseKeepingClient.getDatabase(databaseName).getContainer(containerName).delete(); } } }
List<FeedRange> rsp =
public void feedRange_RecreateContainerWithSameName() { String containerName = UUID.randomUUID().toString(); String databaseName = preExistingDatabaseId; try(CosmosAsyncClient clientUnderTest = cosmosClientBuilderUnderTest.buildAsyncClient()) { for (int i = 0; i < 2; i++) { CosmosContainerProperties cosmosContainerProperties = new CosmosContainerProperties(containerName, "/PE_Name"); houseKeepingClient.getDatabase(databaseName).createContainerIfNotExists(cosmosContainerProperties); List<FeedRange> rsp = clientUnderTest.getDatabase(databaseName).getContainer(containerName).getFeedRanges().block(); assertThat(rsp).isNotNull(); assertThat(rsp).hasSize(1); houseKeepingClient.getDatabase(databaseName).getContainer(containerName).delete(); } } }
class FeedRangeTest extends TestSuiteBase { private CosmosClientBuilder cosmosClientBuilderUnderTest; private CosmosClient houseKeepingClient; private String preExistingDatabaseId = CosmosDatabaseForTest.generateId(); @Factory(dataProvider = "clientBuilders") public FeedRangeTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @BeforeClass(groups = {"emulator"}, timeOut = SETUP_TIMEOUT) public void before_CosmosContainerTest() { cosmosClientBuilderUnderTest = getClientBuilder(); houseKeepingClient = createGatewayHouseKeepingDocumentClient(false).buildClient(); houseKeepingClient.createDatabase(preExistingDatabaseId); } @AfterClass(groups = {"emulator"}, timeOut = 3 * SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { logger.info("starting ...."); safeDeleteSyncDatabase(houseKeepingClient.getDatabase(preExistingDatabaseId)); safeCloseSyncClient(houseKeepingClient); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) }
class FeedRangeTest extends TestSuiteBase { private CosmosClientBuilder cosmosClientBuilderUnderTest; private CosmosClient houseKeepingClient; private String preExistingDatabaseId = CosmosDatabaseForTest.generateId(); @Factory(dataProvider = "clientBuilders") public FeedRangeTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @BeforeClass(groups = {"emulator"}, timeOut = SETUP_TIMEOUT) public void before_CosmosContainerTest() { cosmosClientBuilderUnderTest = getClientBuilder(); houseKeepingClient = createGatewayHouseKeepingDocumentClient(false).buildClient(); houseKeepingClient.createDatabase(preExistingDatabaseId); } @AfterClass(groups = {"emulator"}, timeOut = 3 * SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { logger.info("starting ...."); safeDeleteSyncDatabase(houseKeepingClient.getDatabase(preExistingDatabaseId)); safeCloseSyncClient(houseKeepingClient); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) }
We traditionally put OnErrorMap after flatMap in the reactor chain.
Mono<Response<String>> getAttestationPolicyWithResponse(AttestationType attestationType, AttestationTokenValidationOptions validationOptions, Context context) { final AttestationTokenValidationOptions validationOptionsToUse = (validationOptions != null ? validationOptions : this.tokenValidationOptions); return this.policyImpl.getWithResponseAsync(attestationType, context) .onErrorMap(Utilities::mapException) .flatMap(response -> { Response<AttestationTokenImpl> token = Utilities.generateResponseFromModelType(response, new AttestationTokenImpl(response.getValue().getToken())); return getCachedAttestationSigners() .map(signers -> { token.getValue().validate(signers, validationOptionsToUse); String policyJwt = token.getValue().getBody(com.azure.security.attestation.implementation.models.PolicyResult.class).getPolicy(); AttestationTokenImpl policyToken = new AttestationTokenImpl(policyJwt); StoredAttestationPolicy storedPolicy = policyToken.getBody(StoredAttestationPolicy.class); String policy = null; if (storedPolicy != null) { policy = new String(storedPolicy.getAttestationPolicy(), StandardCharsets.UTF_8); } else { policy = null; } return Utilities.generateAttestationResponseFromModelType(token, policyToken, policy); }); }); }
.onErrorMap(Utilities::mapException)
new AttestationTokenImpl(response.getValue().getToken())); return getCachedAttestationSigners() .map(signers -> { token.getValue().validate(signers, validationOptionsToUse); String policyJwt = token.getValue().getBody(com.azure.security.attestation.implementation.models.PolicyResult.class).getPolicy(); AttestationTokenImpl policyToken = new AttestationTokenImpl(policyJwt); StoredAttestationPolicy storedPolicy = policyToken.getBody(StoredAttestationPolicy.class); String policy = null; if (storedPolicy != null) { policy = new String(storedPolicy.getAttestationPolicy(), StandardCharsets.UTF_8); } else { policy = null; } return Utilities.generateAttestationResponseFromModelType(token, policyToken, policy); }
class AttestationAdministrationAsyncClient { private final SigningCertificatesImpl signingCertificatesImpl; private final PoliciesImpl policyImpl; private final PolicyCertificatesImpl certificatesImpl; private final ClientLogger logger; private final AttestationTokenValidationOptions tokenValidationOptions; private final AtomicReference<List<AttestationSigner>> cachedSigners; private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); /** * Initializes an instance of Attestations client. * * @param clientImpl the service client implementation. */ AttestationAdministrationAsyncClient(AttestationClientImpl clientImpl, AttestationTokenValidationOptions tokenValidationOptions) { this.signingCertificatesImpl = clientImpl.getSigningCertificates(); this.policyImpl = clientImpl.getPolicies(); this.certificatesImpl = clientImpl.getPolicyCertificates(); this.tokenValidationOptions = tokenValidationOptions; this.logger = new ClientLogger(AttestationAdministrationAsyncClient.class); this.cachedSigners = new AtomicReference<>(null); } /** * Retrieves the current policy for an attestation type. * <p> * <b>NOTE:</b> * The {@link AttestationAdministrationAsyncClient * attestation policy specified by the user. This is NOT the full attestation policy maintained by * the attestation service. Specifically it does not include the signing certificates used to verify the attestation * policy. * </p> * <p> * To retrieve the signing certificates used to sign the policy, {@link Response} object returned from this API * is an instance of an {@link com.azure.security.attestation.models.AttestationResponse} object * and the caller can retrieve the full policy object maintained by the service by calling the * {@link AttestationResponse * The returned {@link com.azure.security.attestation.models.AttestationToken} object will be * the value stored by the attestation service. * </p> * * * @param attestationType Specifies the trusted execution environment whose policy should be retrieved. * @param validationOptions Options used to validate the response returned by the attestation service. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the attestation policy expressed as a string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<String>> getAttestationPolicyWithResponse(AttestationType attestationType, AttestationTokenValidationOptions validationOptions) { return withContext(context -> getAttestationPolicyWithResponse(attestationType, validationOptions, context)); } /** * Retrieves the current policy for an attestation type. * <p> * <b>NOTE:</b> * The {@link AttestationAdministrationAsyncClient * attestation policy specified by the user. This is NOT the full attestation policy maintained by * the attestation service. Specifically it does not include the signing certificates used to verify the attestation * policy. * </p> * <p> * To retrieve the signing certificates used to sign the policy, use the {@link AttestationAdministrationAsyncClient * The {@link Response} object is an instance of an {@link com.azure.security.attestation.models.AttestationResponse} object * and the caller can retrieve the full information maintained by the service by calling the {@link AttestationResponse * The returned {@link com.azure.security.attestation.models.AttestationToken} object will be * the value stored by the attestation service. * </p> * * * @param attestationType Specifies the trusted execution environment to be used to validate the evidence. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the response to an attestation policy operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<String> getAttestationPolicy(AttestationType attestationType) { return getAttestationPolicyWithResponse(attestationType, null) .flatMap(FluxUtil::toMono); } /** * Retrieves the current policy for an attestation type. * * @param attestationType Specifies the trusted execution environment whose policy should be retrieved. * @param context Context for the remote call. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the attestation policy expressed as a string. */ Mono<Response<String>> getAttestationPolicyWithResponse(AttestationType attestationType, AttestationTokenValidationOptions validationOptions, Context context) { final AttestationTokenValidationOptions validationOptionsToUse = (validationOptions != null ? validationOptions : this.tokenValidationOptions); return this.policyImpl.getWithResponseAsync(attestationType, context) .onErrorMap(Utilities::mapException) .flatMap(response -> { Response<AttestationTokenImpl> token = Utilities.generateResponseFromModelType(response, ); }); } /** * Sets the current policy for an attestation type with an unsecured attestation policy. * * <p>Note that this API will only work on AAD mode attestation instances, because it sets the policy * using an unsecured attestation token.</p> * * <!-- src_embed com.azure.security.attestation.AttestationAdministrationAsyncClient.setPolicy --> * <pre> * String policyToSet = &quot;version=1.0; authorizationrules& * Mono&lt;PolicyResult&gt; resultMono = client.setAttestationPolicy& * PolicyResult result = resultMono.block& * </pre> * <!-- end com.azure.security.attestation.AttestationAdministrationAsyncClient.setPolicy --> * * @param attestationType Specifies the trusted execution environment to be used to validate the evidence. * @param newAttestationPolicy Specifies the policy to be set on the instance. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the response to an attestation policy operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PolicyResult> setAttestationPolicy(AttestationType attestationType, String newAttestationPolicy) { AttestationPolicySetOptions options = new AttestationPolicySetOptions() .setAttestationPolicy(newAttestationPolicy); return setAttestationPolicyWithResponse(attestationType, options) .flatMap(FluxUtil::toMono); } /** * Sets the current policy for an attestation type. * * @param attestationType Specifies the trusted execution environment to be used to validate the evidence. * @param options Options for the setPolicy operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the response to an attestation policy operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<PolicyResult>> setAttestationPolicyWithResponse(AttestationType attestationType, AttestationPolicySetOptions options) { return withContext(context -> setAttestationPolicyWithResponse(attestationType, options, context)); } /** * Sets the current policy for an attestation type. * * @param attestationType Specifies the trusted execution environment to be used to validate the evidence. * @param options Options for the setPolicy operation, including the new policy to be set. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the response to an attestation policy operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PolicyResult> setAttestationPolicy(AttestationType attestationType, AttestationPolicySetOptions options) { return setAttestationPolicyWithResponse(attestationType, options) .flatMap(FluxUtil::toMono); } /** * Sets the current policy for an attestation type. * * @param attestationType Specifies the trusted execution environment to be used to validate the evidence. * @param options Options for setPolicy API, including policy to set and signing key. * @param context Context for the operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the response to an attestation policy operation. */ Mono<Response<PolicyResult>> setAttestationPolicyWithResponse(AttestationType attestationType, AttestationPolicySetOptions options, Context context) { AttestationTokenValidationOptions validationOptions = options.getValidationOptions(); if (validationOptions == null) { validationOptions = this.tokenValidationOptions; } final AttestationTokenValidationOptions finalOptions = validationOptions; AttestationToken setToken = generatePolicySetToken(options.getAttestationPolicy(), options.getAttestationSigner()); return this.policyImpl.setWithResponseAsync(attestationType, setToken.serialize(), context) .onErrorMap(Utilities::mapException) .flatMap(response -> { Response<AttestationTokenImpl> token = Utilities.generateResponseFromModelType(response, new AttestationTokenImpl(response.getValue().getToken())); return getCachedAttestationSigners() .map(signers -> { token.getValue().validate(signers, finalOptions); PolicyResult policyResult = PolicyResultImpl.fromGenerated(token.getValue().getBody(com.azure.security.attestation.implementation.models.PolicyResult.class)); return Utilities.generateAttestationResponseFromModelType(response, token.getValue(), policyResult); }); }); } /** * Calculates the PolicyTokenHash for a given policy string. * * The policyTokenHash claim in the {@link PolicyResult} class is the SHA-256 hash * of the underlying policy set JSON Web Token sent to the attestation service. * * This helper API allows the caller to independently calculate SHA-256 hash of an * attestation token corresponding to the value which would be sent to the attestation * service. * * The value returned by this API must always match the value in the {@link PolicyResult} object, * if it does not, it means that the attestation policy received by the service is NOT the one * which the customer specified. * * For an example of how to check the policy token hash: * <!-- src_embed com.azure.security.attestation.AttestationAdministrationAsyncClient.checkPolicyTokenHash --> * <pre> * BinaryData expectedHash = client.calculatePolicyTokenHash& * BinaryData actualHash = result.getPolicyTokenHash& * String expectedString = Hex.toHexString& * String actualString = Hex.toHexString& * if & * throw new RuntimeException& * & * </pre> * <!-- end com.azure.security.attestation.AttestationAdministrationAsyncClient.checkPolicyTokenHash --> * * @param policy AttestationPolicy document use in the underlying JWT. * @param signer Optional signing key used to sign the underlying JWT. * @return A {@link BinaryData} containing the SHA-256 hash of the attestation policy token corresponding * to the policy and signer. */ public BinaryData calculatePolicyTokenHash(String policy, AttestationSigningKey signer) { AttestationToken policyToken = generatePolicySetToken(policy, signer); try { MessageDigest md = MessageDigest.getInstance("SHA-256"); md.reset(); md.update(policyToken.serialize().getBytes(StandardCharsets.UTF_8)); return BinaryData.fromBytes(md.digest()); } catch (NoSuchAlgorithmException e) { throw logger.logExceptionAsError(new RuntimeException(e.getMessage())); } } private AttestationToken generatePolicySetToken(String policy, AttestationSigningKey signer) { String serializedPolicy = null; if (policy != null) { StoredAttestationPolicy policyToSet = new StoredAttestationPolicy(); policyToSet.setAttestationPolicy(policy.getBytes(StandardCharsets.UTF_8)); try { serializedPolicy = SERIALIZER_ADAPTER.serialize(policyToSet, SerializerEncoding.JSON); } catch (IOException e) { throw logger.logExceptionAsError(new RuntimeException(e.getMessage())); } } AttestationToken setToken; if (signer == null) { if (policy != null) { setToken = AttestationTokenImpl.createUnsecuredToken(serializedPolicy); } else { setToken = AttestationTokenImpl.createUnsecuredToken(); } } else { if (policy != null) { setToken = AttestationTokenImpl.createSecuredToken(serializedPolicy, signer); } else { setToken = AttestationTokenImpl.createSecuredToken(signer); } } return setToken; } /** * Resets the current policy for an attestation type to the default policy. * * Each AttestationType has a "default" attestation policy, the resetAttestationPolicy API resets the value * of the attestation policy to the "default" policy. * * This API allows an attestation instance owner to undo the result of a * {@link AttestationAdministrationAsyncClient * * * @param attestationType Specifies the trusted execution environment to be used to validate the evidence. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the response to an attestation policy operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PolicyResult> resetAttestationPolicy(AttestationType attestationType) { return resetAttestationPolicyWithResponse(attestationType, new AttestationPolicySetOptions()) .flatMap(FluxUtil::toMono); } /** * Resets the current policy for an attestation type to the default policy. * * Each AttestationType has a "default" attestation policy, the resetAttestationPolicy API resets the value * of the attestation policy to the "default" policy. * * This API allows an attestation instance owner to undo the result of a * {@link AttestationAdministrationAsyncClient * * * @param attestationType Specifies the trusted execution environment to be used to validate the evidence. * @param options Options for the setPolicy operation, including the new policy to be set. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the response to an attestation policy operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PolicyResult> resetAttestationPolicy(AttestationType attestationType, AttestationPolicySetOptions options) { return resetAttestationPolicyWithResponse(attestationType, options) .flatMap(FluxUtil::toMono); } /** * Resets the current policy for an attestation type to the default policy. * * Each AttestationType has a "default" attestation policy, the resetAttestationPolicy API resets the value * of the attestation policy to the "default" policy. * * This API allows an attestation instance owner to undo the result of a * {@link AttestationAdministrationAsyncClient * * @param attestationType Specifies the trusted execution environment to be used to validate the evidence. * @param options Options containing the signing key for the reset operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the response to an attestation policy operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<PolicyResult>> resetAttestationPolicyWithResponse(AttestationType attestationType, AttestationPolicySetOptions options) { return withContext(context -> resetAttestationPolicyWithResponse(attestationType, options, context)); } /** * Resets the current policy for an attestation type to the default policy. * * Each AttestationType has a "default" attestation policy, the resetAttestationPolicy API resets the value * of the attestation policy to the "default" policy. * * This API allows an attestation instance owner to undo the result of a * {@link AttestationAdministrationAsyncClient * * @param attestationType Specifies the trusted execution environment to be used to validate the evidence. * @param options Options for setPolicy API, including policy to set and signing key. * @param context Context for the operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the response to an attestation policy operation. */ Mono<Response<PolicyResult>> resetAttestationPolicyWithResponse(AttestationType attestationType, AttestationPolicySetOptions options, Context context) { if (options.getAttestationPolicy() != null) { logger.logThrowableAsError(new InvalidParameterException("Attestation policy should not be set in resetAttestationPolicy")); } AttestationTokenValidationOptions validationOptions = options.getValidationOptions(); if (validationOptions == null) { validationOptions = this.tokenValidationOptions; } final AttestationTokenValidationOptions finalOptions = validationOptions; AttestationToken setToken; if (options.getAttestationSigner() == null) { setToken = AttestationTokenImpl.createUnsecuredToken(); } else { setToken = AttestationTokenImpl.createSecuredToken(options.getAttestationSigner()); } return this.policyImpl.resetWithResponseAsync(attestationType, setToken.serialize(), context) .onErrorMap(Utilities::mapException) .flatMap(response -> { Response<AttestationTokenImpl> token = Utilities.generateResponseFromModelType(response, new AttestationTokenImpl(response.getValue().getToken())); return getCachedAttestationSigners() .map(signers -> { token.getValue().validate(signers, finalOptions); PolicyResult policyResult = PolicyResultImpl.fromGenerated(token.getValue().getBody(com.azure.security.attestation.implementation.models.PolicyResult.class)); return Utilities.generateAttestationResponseFromModelType(response, token.getValue(), policyResult); }); }); } /** * Retrieves the current set of attestation policy signing certificates for this instance. * * <p> * On an Isolated attestation instance, each {@link AttestationAdministrationAsyncClient * or {@link AttestationAdministrationAsyncClient * must be signed with the private key corresponding to one of the certificates in the list returned * by this API. *</p> * <p> * This establishes that the sender is in possession of the private key associated with the * configured attestation policy management certificates, and thus the sender is authorized * to perform the API operation. * </p> * * @param options Options used to validate the response from the attestation service. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the attestation policy expressed as a string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<List<AttestationSigner>>> listPolicyManagementCertificatesWithResponse(AttestationTokenValidationOptions options) { return withContext(context -> listPolicyManagementCertificatesWithResponse(options, context)); } /** * Retrieves the current set of attestation policy signing certificates for this instance. * * <p> * On an Isolated attestation instance, each {@link AttestationAdministrationAsyncClient * or {@link AttestationAdministrationAsyncClient * must be signed with the private key corresponding to one of the certificates in the list returned * by this API. *</p> * <p> * This establishes that the sender is in possession of the private key associated with the * configured attestation policy management certificates, and thus the sender is authorized * to perform the API operation. * </p> * * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the response to an attestation policy operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<List<AttestationSigner>> listPolicyManagementCertificates() { return listPolicyManagementCertificatesWithResponse(null) .flatMap(FluxUtil::toMono); } /** * Retrieves the current set of attestation policy signing certificates for this instance. * * <p> * On an Isolated attestation instance, each {@link AttestationAdministrationAsyncClient * or {@link AttestationAdministrationAsyncClient * must be signed with the private key corresponding to one of the certificates in the list returned * by this API. *</p> * <p> * This establishes that the sender is in possession of the private key associated with the * configured attestation policy management certificates, and thus the sender is authorized * to perform the API operation. * </p> * @param context Context for the remote call. * @param validationOptions Options used to validate the response from the attestation service. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the attestation policy expressed as a string. */ Mono<Response<List<AttestationSigner>>> listPolicyManagementCertificatesWithResponse(AttestationTokenValidationOptions validationOptions, Context context) { final AttestationTokenValidationOptions optionsToUse = (validationOptions != null ? validationOptions : this.tokenValidationOptions); return this.certificatesImpl.getWithResponseAsync(context) .onErrorMap(Utilities::mapException) .flatMap(response -> { Response<AttestationTokenImpl> responseWithToken = Utilities.generateResponseFromModelType(response, new AttestationTokenImpl(response.getValue().getToken())); return getCachedAttestationSigners() .map(signers -> { responseWithToken.getValue().validate(signers, optionsToUse); JsonWebKeySet policyJwks = responseWithToken.getValue().getBody(com.azure.security.attestation.implementation.models.PolicyCertificatesResult.class).getPolicyCertificates(); List<AttestationSigner> policySigners = AttestationSignerImpl.attestationSignersFromJwks(policyJwks); return Utilities.generateAttestationResponseFromModelType(responseWithToken, responseWithToken.getValue(), policySigners); }); }); } /** * Sets the current policy for an attestation type. * * @param options Options for this API call, encapsulating both the X.509 certificate to add to the set of policy * signing certificates and the signing key used to sign the request to the service. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the response to an attestation policy operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<PolicyCertificatesModificationResult>> addPolicyManagementCertificateWithResponse(PolicyManagementCertificateOptions options) { return withContext(context -> addPolicyManagementCertificateWithResponse(options, context)); } /** * Adds a new attestation policy certificate to the set of policy management certificates. * * @param options Options for this API call, encapsulating both the X.509 certificate to add to the set of policy * signing certificates and the signing key used to sign the request to the service. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the response to an attestation policy operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PolicyCertificatesModificationResult> addPolicyManagementCertificate(PolicyManagementCertificateOptions options) { return addPolicyManagementCertificateWithResponse(options) .flatMap(FluxUtil::toMono); } /** * Adds a new policy management certificate to the set of policy management certificates. * * @param options Options for this API call, encapsulating both the X.509 certificate to add to the set of policy * signing certificates and the signing key used to sign the request to the service. * @param context Context for the operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the response to an attestation policy operation. */ Mono<Response<PolicyCertificatesModificationResult>> addPolicyManagementCertificateWithResponse(PolicyManagementCertificateOptions options, Context context) { Objects.requireNonNull(options.getCertificate()); Objects.requireNonNull(options.getAttestationSigner()); final AttestationTokenValidationOptions finalOptions = this.tokenValidationOptions; String base64Certificate = null; try { base64Certificate = Base64.getEncoder().encodeToString(options.getCertificate().getEncoded()); } catch (CertificateEncodingException e) { throw logger.logExceptionAsError(new RuntimeException(e.getMessage())); } JsonWebKey jwk = new JsonWebKey(options.getCertificate().getType()) .setX5C(new ArrayList<String>()); jwk.getX5C().add(base64Certificate); AttestationCertificateManagementBody certificateBody = new AttestationCertificateManagementBody() .setPolicyCertificate(jwk); AttestationToken addToken = null; try { addToken = AttestationTokenImpl.createSecuredToken(SERIALIZER_ADAPTER.serialize(certificateBody, SerializerEncoding.JSON), options.getAttestationSigner()); } catch (IOException e) { throw logger.logExceptionAsError(new RuntimeException(e.getMessage())); } return this.certificatesImpl.addWithResponseAsync(addToken.serialize(), context) .onErrorMap(Utilities::mapException) .flatMap(response -> { Response<AttestationTokenImpl> token = Utilities.generateResponseFromModelType(response, new AttestationTokenImpl(response.getValue().getToken())); return getCachedAttestationSigners() .map(signers -> { token.getValue().validate(signers, finalOptions); PolicyCertificatesModificationResult addResult = PolicyCertificatesModificationResultImpl.fromGenerated(token.getValue().getBody(com.azure.security.attestation.implementation.models.PolicyCertificatesModificationResult.class)); return Utilities.generateAttestationResponseFromModelType(response, token.getValue(), addResult); }); }); } /** * Removes a policy management certificate from the set of policy management certificates. * * @param options Options for this API call, encapsulating both the X.509 certificate to remove from the set of policy * signing certificates and the signing key used to sign the request to the service. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the response to an attestation policy operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<PolicyCertificatesModificationResult>> removePolicyManagementCertificateWithResponse(PolicyManagementCertificateOptions options) { return withContext(context -> removePolicyManagementCertificateWithResponse(options, context)); } /** * Removes a policy management certificate from the set of policy management certificates. * * @param options Options for this API call, encapsulating both the X.509 certificate to remove from the set of policy * signing certificates and the signing key used to sign the request to the service. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the response to an attestation policy operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PolicyCertificatesModificationResult> removePolicyManagementCertificate(PolicyManagementCertificateOptions options) { return removePolicyManagementCertificateWithResponse(options) .flatMap(FluxUtil::toMono); } /** * Removes a policy management certificate from the set of policy management certificates. * * @param options Options for this API call, encapsulating both the X.509 certificate to remove from the set of policy * signing certificates and the signing key used to sign the request to the service. * @param context Context for the operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the response to an attestation policy operation. */ Mono<Response<PolicyCertificatesModificationResult>> removePolicyManagementCertificateWithResponse(PolicyManagementCertificateOptions options, Context context) { Objects.requireNonNull(options.getCertificate()); Objects.requireNonNull(options.getAttestationSigner()); final AttestationTokenValidationOptions finalOptions = this.tokenValidationOptions; String base64Certificate = null; try { base64Certificate = Base64.getEncoder().encodeToString(options.getCertificate().getEncoded()); } catch (CertificateEncodingException e) { throw logger.logExceptionAsError(new RuntimeException(e.getMessage())); } JsonWebKey jwk = new JsonWebKey(options.getCertificate().getType()) .setX5C(new ArrayList<String>()); jwk.getX5C().add(base64Certificate); AttestationCertificateManagementBody certificateBody = new AttestationCertificateManagementBody() .setPolicyCertificate(jwk); AttestationToken addToken = null; try { addToken = AttestationTokenImpl.createSecuredToken(SERIALIZER_ADAPTER.serialize(certificateBody, SerializerEncoding.JSON), options.getAttestationSigner()); } catch (IOException e) { throw logger.logExceptionAsError(new RuntimeException(e.getMessage())); } return this.certificatesImpl.removeWithResponseAsync(addToken.serialize(), context) .onErrorMap(Utilities::mapException) .flatMap(response -> { Response<AttestationTokenImpl> token = Utilities.generateResponseFromModelType(response, new AttestationTokenImpl(response.getValue().getToken())); return getCachedAttestationSigners() .map(signers -> { token.getValue().validate(signers, finalOptions); PolicyCertificatesModificationResult addResult = PolicyCertificatesModificationResultImpl.fromGenerated(token.getValue().getBody(com.azure.security.attestation.implementation.models.PolicyCertificatesModificationResult.class)); return Utilities.generateAttestationResponseFromModelType(response, token.getValue(), addResult); }); }); } /** * Return cached attestation signers, fetching from the internet if needed. *<p> * Validating an attestation JWT requires a set of attestation signers retrieved from the * attestation service using the `signingCertificatesImpl.getAsync()` API. This API can take * more than 100ms to complete, so caching the value locally can significantly reduce the time * needed to validate the attestation JWT. * </p><p> * Note that there is a possible race condition if two threads on the same client are making * calls to the attestation service. In that case, two calls to `signingCertificatesImpl.getAsync()` * may be made. That should not result in any problems - one of the two calls will complete first * and the `compareAndSet` will update the `cachedSigners`. The second call's result will be discarded * because the `compareAndSet` API won't capture a reference to the second `signers` object. * * </p> * @return cached signers. */ Mono<List<AttestationSigner>> getCachedAttestationSigners() { if (this.cachedSigners.get() != null) { return Mono.just(this.cachedSigners.get()); } else { return this.signingCertificatesImpl.getAsync() .map(AttestationSignerImpl::attestationSignersFromJwks) .map(signers -> { this.cachedSigners.compareAndSet(null, signers); return this.cachedSigners.get(); }); } } }
class AttestationAdministrationAsyncClient { private final SigningCertificatesImpl signingCertificatesImpl; private final PoliciesImpl policyImpl; private final PolicyCertificatesImpl certificatesImpl; private final ClientLogger logger; private final AttestationTokenValidationOptions tokenValidationOptions; private final AtomicReference<List<AttestationSigner>> cachedSigners; private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); /** * Initializes an instance of Attestations client. * * @param clientImpl the service client implementation. */ AttestationAdministrationAsyncClient(AttestationClientImpl clientImpl, AttestationTokenValidationOptions tokenValidationOptions) { this.signingCertificatesImpl = clientImpl.getSigningCertificates(); this.policyImpl = clientImpl.getPolicies(); this.certificatesImpl = clientImpl.getPolicyCertificates(); this.tokenValidationOptions = tokenValidationOptions; this.logger = new ClientLogger(AttestationAdministrationAsyncClient.class); this.cachedSigners = new AtomicReference<>(null); } /** * Retrieves the current policy for an attestation type. * <p> * <b>NOTE:</b> * The {@link AttestationAdministrationAsyncClient * attestation policy specified by the user. This is NOT the full attestation policy maintained by * the attestation service. Specifically it does not include the signing certificates used to verify the attestation * policy. * </p> * <p> * To retrieve the signing certificates used to sign the policy, {@link Response} object returned from this API * is an instance of an {@link com.azure.security.attestation.models.AttestationResponse} object * and the caller can retrieve the full policy object maintained by the service by calling the * {@link AttestationResponse * The returned {@link com.azure.security.attestation.models.AttestationToken} object will be * the value stored by the attestation service. * </p> * * <p><strong>Retrieve the current attestation policy for SGX enclaves.</strong></p> * <!-- src_embed com.azure.security.attestation.AttestationAdministrationAsyncClient.getPolicyWithResponse --> * <pre> * Mono&lt;Response&lt;String&gt;&gt; responseMono = client.getAttestationPolicyWithResponse& * Response&lt;String&gt; response = responseMono.block& * </pre> * <!-- end com.azure.security.attestation.AttestationAdministrationAsyncClient.getPolicyWithResponse --> * * @param attestationType Specifies the trusted execution environment whose policy should be retrieved. * @param validationOptions Options used to validate the response returned by the attestation service. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the attestation policy expressed as a string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<String>> getAttestationPolicyWithResponse(AttestationType attestationType, AttestationTokenValidationOptions validationOptions) { return withContext(context -> getAttestationPolicyWithResponse(attestationType, validationOptions, context)); } /** * Retrieves the current policy for an attestation type. * <p> * <b>NOTE:</b> * The {@code getAttestationPolicy} API returns the underlying * attestation policy specified by the user. This is NOT the full attestation policy maintained by * the attestation service. Specifically it does not include the signing certificates used to verify the attestation * policy. * </p> * <p> * To retrieve the signing certificates used to sign the policy, use the {@link AttestationAdministrationAsyncClient * The {@link Response} object is an instance of an {@link com.azure.security.attestation.models.AttestationResponse} object * and the caller can retrieve the full information maintained by the service by calling the {@link AttestationResponse * The returned {@link com.azure.security.attestation.models.AttestationToken} object will be * the value stored by the attestation service. * </p> * * <P><strong>Retrieve the current attestation policy for SGX enclaves.</strong></P> * <!-- src_embed com.azure.security.attestation.AttestationAdministrationAsyncClient.getPolicy --> * <pre> * Mono&lt;String&gt; policyMono = client.getAttestationPolicy& * String policy = policyMono.block& * </pre> * <!-- end com.azure.security.attestation.AttestationAdministrationAsyncClient.getPolicy --> * * @param attestationType Specifies the trusted execution environment to be used to validate the evidence. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the response to an attestation policy operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<String> getAttestationPolicy(AttestationType attestationType) { return getAttestationPolicyWithResponse(attestationType, null) .flatMap(FluxUtil::toMono); } /** * Retrieves the current policy for an attestation type. * * @param attestationType Specifies the trusted execution environment whose policy should be retrieved. * @param context Context for the remote call. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the attestation policy expressed as a string. */ Mono<Response<String>> getAttestationPolicyWithResponse(AttestationType attestationType, AttestationTokenValidationOptions validationOptions, Context context) { final AttestationTokenValidationOptions validationOptionsToUse = (validationOptions != null ? validationOptions : this.tokenValidationOptions); return this.policyImpl.getWithResponseAsync(attestationType, context) .onErrorMap(Utilities::mapException) .flatMap(response -> { Response<AttestationTokenImpl> token = Utilities.generateResponseFromModelType(response, ); }); } /** * Sets the current policy for an attestation type with an unsecured attestation policy. * * <p>Note that this API will only work on AAD mode attestation instances, because it sets the policy * using an unsecured attestation token.</p> * * <!-- src_embed com.azure.security.attestation.AttestationAdministrationAsyncClient.setPolicySimple --> * <pre> * String policyToSet = &quot;version=1.0; authorizationrules& * Mono&lt;PolicyResult&gt; resultMono = client.setAttestationPolicy& * PolicyResult result = resultMono.block& * </pre> * <!-- end com.azure.security.attestation.AttestationAdministrationAsyncClient.setPolicySimple --> * * @param attestationType Specifies the trusted execution environment to be used to validate the evidence. * @param newAttestationPolicy Specifies the policy to be set on the instance. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the response to an attestation policy operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PolicyResult> setAttestationPolicy(AttestationType attestationType, String newAttestationPolicy) { AttestationPolicySetOptions options = new AttestationPolicySetOptions() .setAttestationPolicy(newAttestationPolicy); return setAttestationPolicyWithResponse(attestationType, options) .flatMap(FluxUtil::toMono); } /** * Sets the current policy for an attestation type with an unsecured attestation policy. * * <p>Note that this API will only work on AAD mode attestation instances, because it sets the policy * using an unsecured attestation token.</p> * * <!-- src_embed com.azure.security.attestation.AttestationAdministrationAsyncClient.setPolicyWithResponseSimple --> * <pre> * Mono&lt;Response&lt;PolicyResult&gt;&gt; resultWithResponseMono = client.setAttestationPolicyWithResponse& * AttestationType.OPEN_ENCLAVE, &quot;version=1.0; authorizationrules& * Response&lt;PolicyResult&gt; response = resultWithResponseMono.block& * </pre> * <!-- end com.azure.security.attestation.AttestationAdministrationAsyncClient.setPolicyWithResponseSimple --> * * @param attestationType Specifies the trusted execution environment to be used to validate the evidence. * @param newAttestationPolicy Specifies the policy to be set on the instance. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the response to an attestation policy operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<PolicyResult>> setAttestationPolicyWithResponse(AttestationType attestationType, String newAttestationPolicy) { AttestationPolicySetOptions options = new AttestationPolicySetOptions() .setAttestationPolicy(newAttestationPolicy); return withContext(context -> setAttestationPolicyWithResponse(attestationType, options, context)); } /** * Sets the current policy for an attestation type. * * Setting the attestation requires that the caller provide an {@link AttestationPolicySetOptions} object * which provides the options for setting the policy. There are two major components to a setPolicy * request: * <ul> * <li>The policy to set</li> * <li>A signing key used to sign the policy sent to the service (OPTIONAL)</li> * </ul> * * On Isolated mode attestation instances, the signing key MUST include one of the configured policy signing * certificates. * * <!-- src_embed com.azure.security.attestation.AttestationAdministrationAsyncClient.setPolicyWithResponse --> * <pre> * Mono&lt;Response&lt;PolicyResult&gt;&gt; resultWithResponseMono = client.setAttestationPolicyWithResponse& * new AttestationPolicySetOptions& * .setAttestationPolicy& * .setAttestationSigner& * Response&lt;PolicyResult&gt; response = resultWithResponseMono.block& * </pre> * <!-- end com.azure.security.attestation.AttestationAdministrationAsyncClient.setPolicyWithResponse --> * * @param attestationType Specifies the trusted execution environment to be used to validate the evidence. * @param options Options for the setPolicy operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the response to an attestation policy operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<PolicyResult>> setAttestationPolicyWithResponse(AttestationType attestationType, AttestationPolicySetOptions options) { return withContext(context -> setAttestationPolicyWithResponse(attestationType, options, context)); } /** * Sets the current policy for an attestation type. * * Setting the attestation requires that the caller provide an {@link AttestationPolicySetOptions} object * which provides the options for setting the policy. There are two major components to a setPolicy * request: * <ul> * <li>The policy to set</li> * <li>A signing key used to sign the policy sent to the service (OPTIONAL)</li> * </ul> * * On Isolated mode attestation instances, the signing key MUST include one of the configured policy signing * certificates. * * <!-- src_embed com.azure.security.attestation.AttestationAdministrationAsyncClient.setPolicy --> * <pre> * String policyToSet = &quot;version=1.0; authorizationrules& * Mono&lt;PolicyResult&gt; resultMono = client.setAttestationPolicy& * new AttestationPolicySetOptions& * .setAttestationPolicy& * .setAttestationSigner& * PolicyResult result = resultMono.block& * </pre> * <!-- end com.azure.security.attestation.AttestationAdministrationAsyncClient.setPolicy --> * * * @param attestationType Specifies the trusted execution environment to be used to validate the evidence. * @param options Options for the setPolicy operation, including the new policy to be set. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the response to an attestation policy operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PolicyResult> setAttestationPolicy(AttestationType attestationType, AttestationPolicySetOptions options) { return setAttestationPolicyWithResponse(attestationType, options) .flatMap(FluxUtil::toMono); } /** * Sets the current policy for an attestation type. * * @param attestationType Specifies the trusted execution environment to be used to validate the evidence. * @param options Options for setPolicy API, including policy to set and signing key. * @param context Context for the operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the response to an attestation policy operation. */ Mono<Response<PolicyResult>> setAttestationPolicyWithResponse(AttestationType attestationType, AttestationPolicySetOptions options, Context context) { AttestationTokenValidationOptions validationOptions = options.getValidationOptions(); if (validationOptions == null) { validationOptions = this.tokenValidationOptions; } final AttestationTokenValidationOptions finalOptions = validationOptions; AttestationToken setToken = generatePolicySetToken(options.getAttestationPolicy(), options.getAttestationSigner()); return this.policyImpl.setWithResponseAsync(attestationType, setToken.serialize(), context) .onErrorMap(Utilities::mapException) .flatMap(response -> { Response<AttestationTokenImpl> token = Utilities.generateResponseFromModelType(response, new AttestationTokenImpl(response.getValue().getToken())); return getCachedAttestationSigners() .map(signers -> { token.getValue().validate(signers, finalOptions); PolicyResult policyResult = PolicyResultImpl.fromGenerated(token.getValue().getBody(com.azure.security.attestation.implementation.models.PolicyResult.class)); return Utilities.generateAttestationResponseFromModelType(response, token.getValue(), policyResult); }); }); } /** * Calculates the PolicyTokenHash for a given policy string. * * The policyTokenHash claim in the {@link PolicyResult} class is the SHA-256 hash * of the underlying policy set JSON Web Token sent to the attestation service. * * This helper API allows the caller to independently calculate SHA-256 hash of an * attestation token corresponding to the value which would be sent to the attestation * service. * * The value returned by this API must always match the value in the {@link PolicyResult} object, * if it does not, it means that the attestation policy received by the service is NOT the one * which the customer specified. * * For an example of how to check the policy token hash: * <!-- src_embed com.azure.security.attestation.AttestationAdministrationAsyncClient.checkPolicyTokenHash --> * <pre> * BinaryData expectedHash = client.calculatePolicyTokenHash& * BinaryData actualHash = result.getPolicyTokenHash& * String expectedString = Hex.toHexString& * String actualString = Hex.toHexString& * if & * throw new RuntimeException& * & * </pre> * <!-- end com.azure.security.attestation.AttestationAdministrationAsyncClient.checkPolicyTokenHash --> * * @param policy AttestationPolicy document use in the underlying JWT. * @param signer Optional signing key used to sign the underlying JWT. * @return A {@link BinaryData} containing the SHA-256 hash of the attestation policy token corresponding * to the policy and signer. */ public BinaryData calculatePolicyTokenHash(String policy, AttestationSigningKey signer) { AttestationToken policyToken = generatePolicySetToken(policy, signer); try { MessageDigest md = MessageDigest.getInstance("SHA-256"); md.reset(); md.update(policyToken.serialize().getBytes(StandardCharsets.UTF_8)); return BinaryData.fromBytes(md.digest()); } catch (NoSuchAlgorithmException e) { throw logger.logExceptionAsError(new RuntimeException(e.getMessage())); } } private AttestationToken generatePolicySetToken(String policy, AttestationSigningKey signer) { String serializedPolicy = null; if (policy != null) { StoredAttestationPolicy policyToSet = new StoredAttestationPolicy(); policyToSet.setAttestationPolicy(policy.getBytes(StandardCharsets.UTF_8)); try { serializedPolicy = SERIALIZER_ADAPTER.serialize(policyToSet, SerializerEncoding.JSON); } catch (IOException e) { throw logger.logExceptionAsError(new RuntimeException(e.getMessage())); } } AttestationToken setToken; if (signer == null) { if (policy != null) { setToken = AttestationTokenImpl.createUnsecuredToken(serializedPolicy); } else { setToken = AttestationTokenImpl.createUnsecuredToken(); } } else { if (policy != null) { setToken = AttestationTokenImpl.createSecuredToken(serializedPolicy, signer); } else { setToken = AttestationTokenImpl.createSecuredToken(signer); } } return setToken; } /** * Resets the current policy for an attestation type to the default policy. * * Each AttestationType has a "default" attestation policy, the resetAttestationPolicy API resets the value * of the attestation policy to the "default" policy. * * This API allows an attestation instance owner to undo the result of a * {@link AttestationAdministrationAsyncClient * * * @param attestationType Specifies the trusted execution environment to be used to validate the evidence. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the response to an attestation policy operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PolicyResult> resetAttestationPolicy(AttestationType attestationType) { return resetAttestationPolicyWithResponse(attestationType, new AttestationPolicySetOptions()) .flatMap(FluxUtil::toMono); } /** * Resets the current policy for an attestation type to the default policy. * * Each AttestationType has a "default" attestation policy, the resetAttestationPolicy API resets the value * of the attestation policy to the "default" policy. * * This API allows an attestation instance owner to undo the result of a * {@link AttestationAdministrationAsyncClient * * * @param attestationType Specifies the trusted execution environment to be used to validate the evidence. * @param options Options for the setPolicy operation, including the new policy to be set. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the response to an attestation policy operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PolicyResult> resetAttestationPolicy(AttestationType attestationType, AttestationPolicySetOptions options) { return resetAttestationPolicyWithResponse(attestationType, options) .flatMap(FluxUtil::toMono); } /** * Resets the current policy for an attestation type to the default policy. * * Each AttestationType has a "default" attestation policy, the resetAttestationPolicy API resets the value * of the attestation policy to the "default" policy. * * This API allows an attestation instance owner to undo the result of a * {@link AttestationAdministrationAsyncClient * * @param attestationType Specifies the trusted execution environment to be used to validate the evidence. * @param options Options containing the signing key for the reset operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the response to an attestation policy operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<PolicyResult>> resetAttestationPolicyWithResponse(AttestationType attestationType, AttestationPolicySetOptions options) { return withContext(context -> resetAttestationPolicyWithResponse(attestationType, options, context)); } /** * Resets the current policy for an attestation type to the default policy. * * Each AttestationType has a "default" attestation policy, the resetAttestationPolicy API resets the value * of the attestation policy to the "default" policy. * * This API allows an attestation instance owner to undo the result of a * {@link AttestationAdministrationAsyncClient * * @param attestationType Specifies the trusted execution environment to be used to validate the evidence. * @param options Options for setPolicy API, including policy to set and signing key. * @param context Context for the operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the response to an attestation policy operation. */ Mono<Response<PolicyResult>> resetAttestationPolicyWithResponse(AttestationType attestationType, AttestationPolicySetOptions options, Context context) { if (options.getAttestationPolicy() != null) { logger.logThrowableAsError(new InvalidParameterException("Attestation policy should not be set in resetAttestationPolicy")); } AttestationTokenValidationOptions validationOptions = options.getValidationOptions(); if (validationOptions == null) { validationOptions = this.tokenValidationOptions; } final AttestationTokenValidationOptions finalOptions = validationOptions; AttestationToken setToken; if (options.getAttestationSigner() == null) { setToken = AttestationTokenImpl.createUnsecuredToken(); } else { setToken = AttestationTokenImpl.createSecuredToken(options.getAttestationSigner()); } return this.policyImpl.resetWithResponseAsync(attestationType, setToken.serialize(), context) .onErrorMap(Utilities::mapException) .flatMap(response -> { Response<AttestationTokenImpl> token = Utilities.generateResponseFromModelType(response, new AttestationTokenImpl(response.getValue().getToken())); return getCachedAttestationSigners() .map(signers -> { token.getValue().validate(signers, finalOptions); PolicyResult policyResult = PolicyResultImpl.fromGenerated(token.getValue().getBody(com.azure.security.attestation.implementation.models.PolicyResult.class)); return Utilities.generateAttestationResponseFromModelType(response, token.getValue(), policyResult); }); }); } /** * Retrieves the current set of attestation policy signing certificates for this instance. * * <p> * On an Isolated attestation instance, each {@link AttestationAdministrationAsyncClient * or {@link AttestationAdministrationAsyncClient * must be signed with the private key corresponding to one of the certificates in the list returned * by this API. *</p> * <p> * This establishes that the sender is in possession of the private key associated with the * configured attestation policy management certificates, and thus the sender is authorized * to perform the API operation. * </p> * * @param options Options used to validate the response from the attestation service. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the attestation policy expressed as a string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<List<AttestationSigner>>> listPolicyManagementCertificatesWithResponse(AttestationTokenValidationOptions options) { return withContext(context -> listPolicyManagementCertificatesWithResponse(options, context)); } /** * Retrieves the current set of attestation policy signing certificates for this instance. * * <p> * On an Isolated attestation instance, each {@link AttestationAdministrationAsyncClient * or {@link AttestationAdministrationAsyncClient * must be signed with the private key corresponding to one of the certificates in the list returned * by this API. *</p> * <p> * This establishes that the sender is in possession of the private key associated with the * configured attestation policy management certificates, and thus the sender is authorized * to perform the API operation. * </p> * * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the response to an attestation policy operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<List<AttestationSigner>> listPolicyManagementCertificates() { return listPolicyManagementCertificatesWithResponse(null) .flatMap(FluxUtil::toMono); } /** * Retrieves the current set of attestation policy signing certificates for this instance. * * <p> * On an Isolated attestation instance, each {@link AttestationAdministrationAsyncClient * or {@link AttestationAdministrationAsyncClient * must be signed with the private key corresponding to one of the certificates in the list returned * by this API. *</p> * <p> * This establishes that the sender is in possession of the private key associated with the * configured attestation policy management certificates, and thus the sender is authorized * to perform the API operation. * </p> * @param context Context for the remote call. * @param validationOptions Options used to validate the response from the attestation service. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the attestation policy expressed as a string. */ Mono<Response<List<AttestationSigner>>> listPolicyManagementCertificatesWithResponse(AttestationTokenValidationOptions validationOptions, Context context) { final AttestationTokenValidationOptions optionsToUse = (validationOptions != null ? validationOptions : this.tokenValidationOptions); return this.certificatesImpl.getWithResponseAsync(context) .onErrorMap(Utilities::mapException) .flatMap(response -> { Response<AttestationTokenImpl> responseWithToken = Utilities.generateResponseFromModelType(response, new AttestationTokenImpl(response.getValue().getToken())); return getCachedAttestationSigners() .map(signers -> { responseWithToken.getValue().validate(signers, optionsToUse); JsonWebKeySet policyJwks = responseWithToken.getValue().getBody(com.azure.security.attestation.implementation.models.PolicyCertificatesResult.class).getPolicyCertificates(); List<AttestationSigner> policySigners = AttestationSignerImpl.attestationSignersFromJwks(policyJwks); return Utilities.generateAttestationResponseFromModelType(responseWithToken, responseWithToken.getValue(), policySigners); }); }); } /** * Sets the current policy for an attestation type. * * @param options Options for this API call, encapsulating both the X.509 certificate to add to the set of policy * signing certificates and the signing key used to sign the request to the service. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the response to an attestation policy operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<PolicyCertificatesModificationResult>> addPolicyManagementCertificateWithResponse(PolicyManagementCertificateOptions options) { return withContext(context -> addPolicyManagementCertificateWithResponse(options, context)); } /** * Adds a new attestation policy certificate to the set of policy management certificates. * * @param options Options for this API call, encapsulating both the X.509 certificate to add to the set of policy * signing certificates and the signing key used to sign the request to the service. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the response to an attestation policy operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PolicyCertificatesModificationResult> addPolicyManagementCertificate(PolicyManagementCertificateOptions options) { return addPolicyManagementCertificateWithResponse(options) .flatMap(FluxUtil::toMono); } /** * Adds a new policy management certificate to the set of policy management certificates. * * @param options Options for this API call, encapsulating both the X.509 certificate to add to the set of policy * signing certificates and the signing key used to sign the request to the service. * @param context Context for the operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the response to an attestation policy operation. */ Mono<Response<PolicyCertificatesModificationResult>> addPolicyManagementCertificateWithResponse(PolicyManagementCertificateOptions options, Context context) { Objects.requireNonNull(options.getCertificate()); Objects.requireNonNull(options.getAttestationSigner()); final AttestationTokenValidationOptions finalOptions = this.tokenValidationOptions; String base64Certificate = null; try { base64Certificate = Base64.getEncoder().encodeToString(options.getCertificate().getEncoded()); } catch (CertificateEncodingException e) { throw logger.logExceptionAsError(new RuntimeException(e.getMessage())); } JsonWebKey jwk = new JsonWebKey(options.getCertificate().getType()) .setX5C(new ArrayList<String>()); jwk.getX5C().add(base64Certificate); AttestationCertificateManagementBody certificateBody = new AttestationCertificateManagementBody() .setPolicyCertificate(jwk); AttestationToken addToken = null; try { addToken = AttestationTokenImpl.createSecuredToken(SERIALIZER_ADAPTER.serialize(certificateBody, SerializerEncoding.JSON), options.getAttestationSigner()); } catch (IOException e) { throw logger.logExceptionAsError(new RuntimeException(e.getMessage())); } return this.certificatesImpl.addWithResponseAsync(addToken.serialize(), context) .onErrorMap(Utilities::mapException) .flatMap(response -> { Response<AttestationTokenImpl> token = Utilities.generateResponseFromModelType(response, new AttestationTokenImpl(response.getValue().getToken())); return getCachedAttestationSigners() .map(signers -> { token.getValue().validate(signers, finalOptions); PolicyCertificatesModificationResult addResult = PolicyCertificatesModificationResultImpl.fromGenerated(token.getValue().getBody(com.azure.security.attestation.implementation.models.PolicyCertificatesModificationResult.class)); return Utilities.generateAttestationResponseFromModelType(response, token.getValue(), addResult); }); }); } /** * Removes a policy management certificate from the set of policy management certificates. * * @param options Options for this API call, encapsulating both the X.509 certificate to remove from the set of policy * signing certificates and the signing key used to sign the request to the service. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the response to an attestation policy operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<PolicyCertificatesModificationResult>> removePolicyManagementCertificateWithResponse(PolicyManagementCertificateOptions options) { return withContext(context -> removePolicyManagementCertificateWithResponse(options, context)); } /** * Removes a policy management certificate from the set of policy management certificates. * * @param options Options for this API call, encapsulating both the X.509 certificate to remove from the set of policy * signing certificates and the signing key used to sign the request to the service. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the response to an attestation policy operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PolicyCertificatesModificationResult> removePolicyManagementCertificate(PolicyManagementCertificateOptions options) { return removePolicyManagementCertificateWithResponse(options) .flatMap(FluxUtil::toMono); } /** * Removes a policy management certificate from the set of policy management certificates. * * @param options Options for this API call, encapsulating both the X.509 certificate to remove from the set of policy * signing certificates and the signing key used to sign the request to the service. * @param context Context for the operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the response to an attestation policy operation. */ Mono<Response<PolicyCertificatesModificationResult>> removePolicyManagementCertificateWithResponse(PolicyManagementCertificateOptions options, Context context) { Objects.requireNonNull(options.getCertificate()); Objects.requireNonNull(options.getAttestationSigner()); final AttestationTokenValidationOptions finalOptions = this.tokenValidationOptions; String base64Certificate = null; try { base64Certificate = Base64.getEncoder().encodeToString(options.getCertificate().getEncoded()); } catch (CertificateEncodingException e) { throw logger.logExceptionAsError(new RuntimeException(e.getMessage())); } JsonWebKey jwk = new JsonWebKey(options.getCertificate().getType()) .setX5C(new ArrayList<String>()); jwk.getX5C().add(base64Certificate); AttestationCertificateManagementBody certificateBody = new AttestationCertificateManagementBody() .setPolicyCertificate(jwk); AttestationToken addToken = null; try { addToken = AttestationTokenImpl.createSecuredToken(SERIALIZER_ADAPTER.serialize(certificateBody, SerializerEncoding.JSON), options.getAttestationSigner()); } catch (IOException e) { throw logger.logExceptionAsError(new RuntimeException(e.getMessage())); } return this.certificatesImpl.removeWithResponseAsync(addToken.serialize(), context) .onErrorMap(Utilities::mapException) .flatMap(response -> { Response<AttestationTokenImpl> token = Utilities.generateResponseFromModelType(response, new AttestationTokenImpl(response.getValue().getToken())); return getCachedAttestationSigners() .map(signers -> { token.getValue().validate(signers, finalOptions); PolicyCertificatesModificationResult addResult = PolicyCertificatesModificationResultImpl.fromGenerated(token.getValue().getBody(com.azure.security.attestation.implementation.models.PolicyCertificatesModificationResult.class)); return Utilities.generateAttestationResponseFromModelType(response, token.getValue(), addResult); }); }); } /** * Return cached attestation signers, fetching from the internet if needed. *<p> * Validating an attestation JWT requires a set of attestation signers retrieved from the * attestation service using the `signingCertificatesImpl.getAsync()` API. This API can take * more than 100ms to complete, so caching the value locally can significantly reduce the time * needed to validate the attestation JWT. * </p><p> * Note that there is a possible race condition if two threads on the same client are making * calls to the attestation service. In that case, two calls to `signingCertificatesImpl.getAsync()` * may be made. That should not result in any problems - one of the two calls will complete first * and the `compareAndSet` will update the `cachedSigners`. The second call's result will be discarded * because the `compareAndSet` API won't capture a reference to the second `signers` object. * * </p> * @return cached signers. */ Mono<List<AttestationSigner>> getCachedAttestationSigners() { if (this.cachedSigners.get() != null) { return Mono.just(this.cachedSigners.get()); } else { return this.signingCertificatesImpl.getAsync() .map(AttestationSignerImpl::attestationSignersFromJwks) .map(signers -> { this.cachedSigners.compareAndSet(null, signers); return this.cachedSigners.get(); }); } } }
What if the request has STRONG or BOUNDED consistency?
private Mono<Void> applySessionToken(RxDocumentServiceRequest request) { Map<String, String> headers = request.getHeaders(); Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null"); if (isMasterOperation(request.getResourceType(), request.getOperationType())) { if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return Mono.empty(); } if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { return Mono.empty(); } boolean sessionConsistency = RequestHelper.getConsistencyLevelToUse(this.gatewayServiceConfigurationReader, request) == ConsistencyLevel.SESSION; if (!sessionConsistency || (!request.isReadOnlyRequest() && request.getOperationType() != OperationType.Batch && !this.useMultipleWriteLocations)) { return Mono.empty(); } String partitionKeyRangeId = request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID); PartitionKeyInternal partitionKeyInternal = request.getPartitionKeyInternal(); if (StringUtils.isNotEmpty(partitionKeyRangeId)) { SessionTokenHelper.setPartitionLocalSessionToken(request, partitionKeyRangeId, sessionContainer); return Mono.empty(); } else if (partitionKeyInternal != null) { return this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request). flatMap(collectionValueHolder -> partitionKeyRangeCache.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collectionValueHolder.v.getResourceId(), null, null).flatMap(collectionRoutingMapValueHolder -> { String effectivePartitionKeyString = PartitionKeyInternalHelper .getEffectivePartitionKeyString( partitionKeyInternal, collectionValueHolder.v.getPartitionKey()); PartitionKeyRange range = collectionRoutingMapValueHolder.v.getRangeByEffectivePartitionKey(effectivePartitionKeyString); request.requestContext.resolvedPartitionKeyRange = range; SessionTokenHelper.setPartitionLocalSessionToken(request, sessionContainer); return Mono.empty(); })); } else { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } return Mono.empty(); } }
boolean sessionConsistency = RequestHelper.getConsistencyLevelToUse(this.gatewayServiceConfigurationReader,
private Mono<Void> applySessionToken(RxDocumentServiceRequest request) { Map<String, String> headers = request.getHeaders(); Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null"); if (isMasterOperation(request.getResourceType(), request.getOperationType())) { if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return Mono.empty(); } boolean sessionConsistency = RequestHelper.getConsistencyLevelToUse(this.gatewayServiceConfigurationReader, request) == ConsistencyLevel.SESSION; if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { if (!sessionConsistency || (!request.isReadOnlyRequest() && request.getOperationType() != OperationType.Batch && !this.useMultipleWriteLocations)){ request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return Mono.empty(); } if (!sessionConsistency || (!request.isReadOnlyRequest() && request.getOperationType() != OperationType.Batch && !this.useMultipleWriteLocations)) { return Mono.empty(); } if (this.collectionCache != null && this.partitionKeyRangeCache != null) { return this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request). flatMap(collectionValueHolder -> { if(collectionValueHolder== null || collectionValueHolder.v == null) { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } return Mono.empty(); } return partitionKeyRangeCache.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collectionValueHolder.v.getResourceId(), null, null).flatMap(collectionRoutingMapValueHolder -> { if (collectionRoutingMapValueHolder == null || collectionRoutingMapValueHolder.v == null) { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } return Mono.empty(); } String partitionKeyRangeId = request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID); PartitionKeyInternal partitionKeyInternal = request.getPartitionKeyInternal(); if (StringUtils.isNotEmpty(partitionKeyRangeId)) { PartitionKeyRange range = collectionRoutingMapValueHolder.v.getRangeByPartitionKeyRangeId(partitionKeyRangeId); request.requestContext.resolvedPartitionKeyRange = range; if (request.requestContext.resolvedPartitionKeyRange == null) { SessionTokenHelper.setPartitionLocalSessionToken(request, partitionKeyRangeId, sessionContainer); } else { SessionTokenHelper.setPartitionLocalSessionToken(request, sessionContainer); } } else if (partitionKeyInternal != null) { String effectivePartitionKeyString = PartitionKeyInternalHelper .getEffectivePartitionKeyString( partitionKeyInternal, collectionValueHolder.v.getPartitionKey()); PartitionKeyRange range = collectionRoutingMapValueHolder.v.getRangeByEffectivePartitionKey(effectivePartitionKeyString); request.requestContext.resolvedPartitionKeyRange = range; SessionTokenHelper.setPartitionLocalSessionToken(request, sessionContainer); } else { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } } return Mono.empty(); }); }); } else { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } return Mono.empty(); } }
class RxGatewayStoreModel implements RxStoreModel { private final static byte[] EMPTY_BYTE_ARRAY = {}; private final DiagnosticsClientContext clientContext; private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class); private final Map<String, String> defaultHeaders; private final HttpClient httpClient; private final QueryCompatibilityMode queryCompatibilityMode; private final GlobalEndpointManager globalEndpointManager; private ConsistencyLevel defaultConsistencyLevel; private ISessionContainer sessionContainer; private ThroughputControlStore throughputControlStore; private boolean useMultipleWriteLocations; private RxPartitionKeyRangeCache partitionKeyRangeCache; private GatewayServiceConfigurationReader gatewayServiceConfigurationReader; private RxClientCollectionCache collectionCache; public RxGatewayStoreModel( DiagnosticsClientContext clientContext, ISessionContainer sessionContainer, ConsistencyLevel defaultConsistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient) { this.clientContext = clientContext; this.defaultHeaders = new HashMap<>(); this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL, "no-cache"); this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); if (userAgentContainer == null) { userAgentContainer = new UserAgentContainer(); } this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent()); if (defaultConsistencyLevel != null) { this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, defaultConsistencyLevel.toString()); } this.defaultConsistencyLevel = defaultConsistencyLevel; this.globalEndpointManager = globalEndpointManager; this.queryCompatibilityMode = queryCompatibilityMode; this.httpClient = httpClient; this.sessionContainer = sessionContainer; } void setGatewayServiceConfigurationReader(GatewayServiceConfigurationReader gatewayServiceConfigurationReader) { this.gatewayServiceConfigurationReader = gatewayServiceConfigurationReader; } public void setPartitionKeyRangeCache(RxPartitionKeyRangeCache partitionKeyRangeCache) { this.partitionKeyRangeCache = partitionKeyRangeCache; } public void setUseMultipleWriteLocations(boolean useMultipleWriteLocations) { this.useMultipleWriteLocations = useMultipleWriteLocations; } public void setCollectionCache(RxClientCollectionCache collectionCache) { this.collectionCache = collectionCache; } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PATCH); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PUT); } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.DELETE); } private Mono<RxDocumentServiceResponse> deleteByPartitionKey(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { if(request.getOperationType() != OperationType.QueryPlan) { request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true"); } switch (this.queryCompatibilityMode) { case SqlQuery: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.SQL); break; case Default: case Query: default: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.QUERY_JSON); break; } return this.performRequest(request, HttpMethod.POST); } public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) { try { if (request.requestContext.cosmosDiagnostics == null) { request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics(); } URI uri = getUri(request); request.requestContext.resourcePhysicalAddress = uri.toString(); if (this.throughputControlStore != null) { return this.throughputControlStore.processRequest(request, performRequestInternal(request, method, uri)); } return this.performRequestInternal(request, method, uri); } catch (Exception e) { return Mono.error(e); } } /** * Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse. * * @param request * @param method * @param requestUri * @return Flux<RxDocumentServiceResponse> */ public Mono<RxDocumentServiceResponse> performRequestInternal(RxDocumentServiceRequest request, HttpMethod method, URI requestUri) { try { HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders()); Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux(); HttpRequest httpRequest = new HttpRequest(method, requestUri, requestUri.getPort(), httpHeaders, contentAsByteArray); Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds()); if (OperationType.QueryPlan.equals(request.getOperationType())) { responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds()); } else if (request.isAddressRefresh()) { responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()); } Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout); return toDocumentServiceResponse(httpResponseMono, request, httpRequest); } catch (Exception e) { return Mono.error(e); } } private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) { HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size()); for (Entry<String, String> entry : this.defaultHeaders.entrySet()) { if (!headers.containsKey(entry.getKey())) { httpHeaders.set(entry.getKey(), entry.getValue()); } } if (headers != null) { for (Entry<String, String> entry : headers.entrySet()) { if (entry.getValue() == null) { httpHeaders.set(entry.getKey(), ""); } else { httpHeaders.set(entry.getKey(), entry.getValue()); } } } return httpHeaders; } private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException { URI rootUri = request.getEndpointOverride(); if (rootUri == null) { if (request.getIsMedia()) { rootUri = this.globalEndpointManager.getWriteEndpoints().get(0); } else { rootUri = this.globalEndpointManager.resolveServiceEndpoint(request); } } String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed); if(request.getResourceType().equals(ResourceType.DatabaseAccount)) { path = StringUtils.EMPTY; } return new URI("https", null, rootUri.getHost(), rootUri.getPort(), ensureSlashPrefixed(path), null, null); } private String ensureSlashPrefixed(String path) { if (path == null) { return null; } if (path.startsWith("/")) { return path; } return "/" + path; } /** * Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable. * * * Once the customer code subscribes to the observable returned by the CRUD APIs, * the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made. * * @param httpResponseMono * @param request * @return {@link Mono} */ private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono, RxDocumentServiceRequest request, HttpRequest httpRequest) { return httpResponseMono.flatMap(httpResponse -> { HttpHeaders httpResponseHeaders = httpResponse.headers(); int httpResponseStatus = httpResponse.statusCode(); Mono<byte[]> contentObservable = httpResponse .bodyAsByteArray() .switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY)); return contentObservable .map(content -> { ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord(); if (reactorNettyRequestRecord != null) { reactorNettyRequestRecord.setTimeCompleted(Instant.now()); BridgeInternal.setGatewayRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, reactorNettyRequestRecord.takeTimelineSnapshot()); } validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content); StoreResponse rsp = new StoreResponse(httpResponseStatus, HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()), content); DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot()); if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null); DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics); } return rsp; }) .single(); }).map(rsp -> { if (httpRequest.reactorNettyRequestRecord() != null) { return new RxDocumentServiceResponse(this.clientContext, rsp, httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot()); } else { return new RxDocumentServiceResponse(this.clientContext, rsp); } }).onErrorResume(throwable -> { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (!(unwrappedException instanceof Exception)) { logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException); return Mono.error(unwrappedException); } Exception exception = (Exception) unwrappedException; CosmosException dce; if (!(exception instanceof CosmosException)) { logger.error("Network failure", exception); dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, 0, exception); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); } else { dce = (CosmosException) exception; } if (WebExceptionUtility.isNetworkFailure(dce)) { if (WebExceptionUtility.isReadTimeoutException(dce)) { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); } else { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE); } } if (request.requestContext.cosmosDiagnostics != null) { if (BridgeInternal.getClientSideRequestStatics(request.requestContext.cosmosDiagnostics).getGatewayRequestTimeline() == null && httpRequest.reactorNettyRequestRecord() != null) { BridgeInternal.setGatewayRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot()); } BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); }); } private void validateOrThrow(RxDocumentServiceRequest request, HttpResponseStatus status, HttpHeaders headers, byte[] bodyAsBytes) { int statusCode = status.code(); if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) { String statusCodeString = status.reasonPhrase() != null ? status.reasonPhrase().replace(" ", "") : ""; String body = bodyAsBytes != null ? new String(bodyAsBytes) : null; CosmosError cosmosError; cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError(); cosmosError = new CosmosError(statusCodeString, String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString), cosmosError.getPartitionedQueryExecutionInfo()); CosmosException dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, statusCode, cosmosError, headers.toMap()); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); throw dce; } } private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) { switch (request.getOperationType()) { case Create: case Batch: return this.create(request); case Patch: return this.patch(request); case Upsert: return this.upsert(request); case Delete: if (request.getResourceType() == ResourceType.PartitionKey) { return this.deleteByPartitionKey(request); } return this.delete(request); case ExecuteJavaScript: return this.execute(request); case Read: return this.read(request); case ReadFeed: return this.readFeed(request); case Replace: return this.replace(request); case SqlQuery: case Query: case QueryPlan: return this.query(request); default: throw new IllegalStateException("Unknown operation setType " + request.getOperationType()); } } private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) { Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single(); return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy(BridgeInternal.getRetryContext(request.requestContext.cosmosDiagnostics))); } @Override public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) { Mono<RxDocumentServiceResponse> responseObs = this.applySessionToken(request).then(invokeAsync(request)); return responseObs.onErrorResume( e -> { CosmosException dce = Utils.as(e, CosmosException.class); if (dce == null) { logger.error("unexpected failure {}", e.getMessage(), e); return Mono.error(e); } if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) && (dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED || dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT || ( dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND && !Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) { this.captureSessionToken(request, dce.getResponseHeaders()); } if (Exceptions.isThroughputControlRequestRateTooLargeException(dce)) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); } ).map(response -> { this.captureSessionToken(request, response.getResponseHeaders()); return response; } ); } @Override public void enableThroughputControl(ThroughputControlStore throughputControlStore) { } private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { if (request.getResourceType() == ResourceType.DocumentCollection && request.getOperationType() == OperationType.Delete) { String resourceId; if (request.getIsNameBased()) { resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID); } else { resourceId = request.getResourceId(); } this.sessionContainer.clearTokenByResourceId(resourceId); } else { this.sessionContainer.setSessionToken(request, responseHeaders); } } private static boolean isMasterOperation(ResourceType resourceType, OperationType operationType) { return ReplicatedResourceClientUtils.isMasterResource(resourceType) || isStoredProcedureMasterOperation(resourceType, operationType) || operationType == OperationType.QueryPlan; } private static boolean isStoredProcedureMasterOperation(ResourceType resourceType, OperationType operationType) { return resourceType == ResourceType.StoredProcedure && operationType != OperationType.ExecuteJavaScript; } }
class RxGatewayStoreModel implements RxStoreModel { private final static byte[] EMPTY_BYTE_ARRAY = {}; private final DiagnosticsClientContext clientContext; private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class); private final Map<String, String> defaultHeaders; private final HttpClient httpClient; private final QueryCompatibilityMode queryCompatibilityMode; private final GlobalEndpointManager globalEndpointManager; private ConsistencyLevel defaultConsistencyLevel; private ISessionContainer sessionContainer; private ThroughputControlStore throughputControlStore; private boolean useMultipleWriteLocations; private RxPartitionKeyRangeCache partitionKeyRangeCache; private GatewayServiceConfigurationReader gatewayServiceConfigurationReader; private RxClientCollectionCache collectionCache; public RxGatewayStoreModel( DiagnosticsClientContext clientContext, ISessionContainer sessionContainer, ConsistencyLevel defaultConsistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient, ApiType apiType) { this.clientContext = clientContext; this.defaultHeaders = new HashMap<>(); this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL, "no-cache"); this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); if (apiType != null){ this.defaultHeaders.put(HttpConstants.HttpHeaders.API_TYPE, apiType.toString()); } if (userAgentContainer == null) { userAgentContainer = new UserAgentContainer(); } this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent()); if (defaultConsistencyLevel != null) { this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, defaultConsistencyLevel.toString()); } this.defaultConsistencyLevel = defaultConsistencyLevel; this.globalEndpointManager = globalEndpointManager; this.queryCompatibilityMode = queryCompatibilityMode; this.httpClient = httpClient; this.sessionContainer = sessionContainer; } void setGatewayServiceConfigurationReader(GatewayServiceConfigurationReader gatewayServiceConfigurationReader) { this.gatewayServiceConfigurationReader = gatewayServiceConfigurationReader; } public void setPartitionKeyRangeCache(RxPartitionKeyRangeCache partitionKeyRangeCache) { this.partitionKeyRangeCache = partitionKeyRangeCache; } public void setUseMultipleWriteLocations(boolean useMultipleWriteLocations) { this.useMultipleWriteLocations = useMultipleWriteLocations; } boolean isUseMultipleWriteLocations() { return useMultipleWriteLocations; } RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return partitionKeyRangeCache; } GatewayServiceConfigurationReader getGatewayServiceConfigurationReader() { return gatewayServiceConfigurationReader; } RxClientCollectionCache getCollectionCache() { return collectionCache; } public void setCollectionCache(RxClientCollectionCache collectionCache) { this.collectionCache = collectionCache; } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PATCH); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PUT); } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.DELETE); } private Mono<RxDocumentServiceResponse> deleteByPartitionKey(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { if(request.getOperationType() != OperationType.QueryPlan) { request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true"); } switch (this.queryCompatibilityMode) { case SqlQuery: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.SQL); break; case Default: case Query: default: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.QUERY_JSON); break; } return this.performRequest(request, HttpMethod.POST); } public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) { try { if (request.requestContext.cosmosDiagnostics == null) { request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics(); } URI uri = getUri(request); request.requestContext.resourcePhysicalAddress = uri.toString(); if (this.throughputControlStore != null) { return this.throughputControlStore.processRequest(request, performRequestInternal(request, method, uri)); } return this.performRequestInternal(request, method, uri); } catch (Exception e) { return Mono.error(e); } } /** * Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse. * * @param request * @param method * @param requestUri * @return Flux<RxDocumentServiceResponse> */ public Mono<RxDocumentServiceResponse> performRequestInternal(RxDocumentServiceRequest request, HttpMethod method, URI requestUri) { try { HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders()); Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux(); HttpRequest httpRequest = new HttpRequest(method, requestUri, requestUri.getPort(), httpHeaders, contentAsByteArray); Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds()); if (OperationType.QueryPlan.equals(request.getOperationType())) { responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds()); } else if (request.isAddressRefresh()) { responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()); } Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout); return toDocumentServiceResponse(httpResponseMono, request, httpRequest); } catch (Exception e) { return Mono.error(e); } } private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) { HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size()); for (Entry<String, String> entry : this.defaultHeaders.entrySet()) { if (!headers.containsKey(entry.getKey())) { httpHeaders.set(entry.getKey(), entry.getValue()); } } if (headers != null) { for (Entry<String, String> entry : headers.entrySet()) { if (entry.getValue() == null) { httpHeaders.set(entry.getKey(), ""); } else { httpHeaders.set(entry.getKey(), entry.getValue()); } } } return httpHeaders; } private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException { URI rootUri = request.getEndpointOverride(); if (rootUri == null) { if (request.getIsMedia()) { rootUri = this.globalEndpointManager.getWriteEndpoints().get(0); } else { rootUri = this.globalEndpointManager.resolveServiceEndpoint(request); } } String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed); if(request.getResourceType().equals(ResourceType.DatabaseAccount)) { path = StringUtils.EMPTY; } return new URI("https", null, rootUri.getHost(), rootUri.getPort(), ensureSlashPrefixed(path), null, null); } private String ensureSlashPrefixed(String path) { if (path == null) { return null; } if (path.startsWith("/")) { return path; } return "/" + path; } /** * Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable. * * * Once the customer code subscribes to the observable returned by the CRUD APIs, * the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made. * * @param httpResponseMono * @param request * @return {@link Mono} */ private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono, RxDocumentServiceRequest request, HttpRequest httpRequest) { return httpResponseMono.flatMap(httpResponse -> { HttpHeaders httpResponseHeaders = httpResponse.headers(); int httpResponseStatus = httpResponse.statusCode(); Mono<byte[]> contentObservable = httpResponse .bodyAsByteArray() .switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY)); return contentObservable .map(content -> { ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord(); if (reactorNettyRequestRecord != null) { reactorNettyRequestRecord.setTimeCompleted(Instant.now()); BridgeInternal.setGatewayRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, reactorNettyRequestRecord.takeTimelineSnapshot()); } validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content); StoreResponse rsp = new StoreResponse(httpResponseStatus, HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()), content); DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot()); if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null); DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics); } return rsp; }) .single(); }).map(rsp -> { if (httpRequest.reactorNettyRequestRecord() != null) { return new RxDocumentServiceResponse(this.clientContext, rsp, httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot()); } else { return new RxDocumentServiceResponse(this.clientContext, rsp); } }).onErrorResume(throwable -> { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (!(unwrappedException instanceof Exception)) { logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException); return Mono.error(unwrappedException); } Exception exception = (Exception) unwrappedException; CosmosException dce; if (!(exception instanceof CosmosException)) { logger.error("Network failure", exception); dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, 0, exception); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); } else { dce = (CosmosException) exception; } if (WebExceptionUtility.isNetworkFailure(dce)) { if (WebExceptionUtility.isReadTimeoutException(dce)) { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); } else { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE); } } if (request.requestContext.cosmosDiagnostics != null) { if (BridgeInternal.getClientSideRequestStatics(request.requestContext.cosmosDiagnostics).getGatewayRequestTimeline() == null && httpRequest.reactorNettyRequestRecord() != null) { BridgeInternal.setGatewayRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot()); } BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); }); } private void validateOrThrow(RxDocumentServiceRequest request, HttpResponseStatus status, HttpHeaders headers, byte[] bodyAsBytes) { int statusCode = status.code(); if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) { String statusCodeString = status.reasonPhrase() != null ? status.reasonPhrase().replace(" ", "") : ""; String body = bodyAsBytes != null ? new String(bodyAsBytes) : null; CosmosError cosmosError; cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError(); cosmosError = new CosmosError(statusCodeString, String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString), cosmosError.getPartitionedQueryExecutionInfo()); CosmosException dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, statusCode, cosmosError, headers.toMap()); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); throw dce; } } private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) { switch (request.getOperationType()) { case Create: case Batch: return this.create(request); case Patch: return this.patch(request); case Upsert: return this.upsert(request); case Delete: if (request.getResourceType() == ResourceType.PartitionKey) { return this.deleteByPartitionKey(request); } return this.delete(request); case ExecuteJavaScript: return this.execute(request); case Read: return this.read(request); case ReadFeed: return this.readFeed(request); case Replace: return this.replace(request); case SqlQuery: case Query: case QueryPlan: return this.query(request); default: throw new IllegalStateException("Unknown operation setType " + request.getOperationType()); } } private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) { Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single(); return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy(BridgeInternal.getRetryContext(request.requestContext.cosmosDiagnostics))); } @Override public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) { Mono<RxDocumentServiceResponse> responseObs = this.addIntendedCollectionRidAndSessionToken(request).then(invokeAsync(request)); return responseObs.onErrorResume( e -> { CosmosException dce = Utils.as(e, CosmosException.class); if (dce == null) { logger.error("unexpected failure {}", e.getMessage(), e); return Mono.error(e); } if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) && (dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED || dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT || ( dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND && !Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) { this.captureSessionToken(request, dce.getResponseHeaders()); } if (Exceptions.isThroughputControlRequestRateTooLargeException(dce)) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); } ).flatMap(response -> this.captureSessionTokenAndHandlePartitionSplit(request, response.getResponseHeaders()).then(Mono.just(response)) ); } @Override public void enableThroughputControl(ThroughputControlStore throughputControlStore) { } private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { if (request.getResourceType() == ResourceType.DocumentCollection && request.getOperationType() == OperationType.Delete) { String resourceId; if (request.getIsNameBased()) { resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID); } else { resourceId = request.getResourceId(); } this.sessionContainer.clearTokenByResourceId(resourceId); } else { this.sessionContainer.setSessionToken(request, responseHeaders); } } private Mono<Void> captureSessionTokenAndHandlePartitionSplit(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { this.captureSessionToken(request, responseHeaders); if (request.requestContext.resolvedPartitionKeyRange != null && StringUtils.isNotEmpty(request.requestContext.resolvedCollectionRid) && StringUtils.isNotEmpty(responseHeaders.get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID)) && !responseHeaders.get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID).equals(request.requestContext.resolvedPartitionKeyRange.getId())) { return this.partitionKeyRangeCache.refreshAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request.requestContext.resolvedCollectionRid) .flatMap(collectionRoutingMapValueHolder -> Mono.empty()); } return Mono.empty(); } private Mono<Void> addIntendedCollectionRidAndSessionToken(RxDocumentServiceRequest request) { return applySessionToken(request).then(addIntendedCollectionRid(request)); } private Mono<Void> addIntendedCollectionRid(RxDocumentServiceRequest request) { if (this.collectionCache != null && request.getResourceType().equals(ResourceType.Document)) { return this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request).flatMap(documentCollectionValueHolder -> { if (StringUtils.isEmpty(request.getHeaders().get(INTENDED_COLLECTION_RID_HEADER))) { request.getHeaders().put(INTENDED_COLLECTION_RID_HEADER, request.requestContext.resolvedCollectionRid); } else { request.intendedCollectionRidPassedIntoSDK = true; } return Mono.empty(); }); } return Mono.empty(); } private static boolean isMasterOperation(ResourceType resourceType, OperationType operationType) { return ReplicatedResourceClientUtils.isMasterResource(resourceType) || isStoredProcedureMasterOperation(resourceType, operationType) || operationType == OperationType.QueryPlan; } private static boolean isStoredProcedureMasterOperation(ResourceType resourceType, OperationType operationType) { return resourceType == ResourceType.StoredProcedure && operationType != OperationType.ExecuteJavaScript; } }
This might not work because under the setPartitionLocalSessionToken we rely on the request context and that might point to some fields that are null. See my other PR, I think it is fine to call the resolveGlobalSessionToken with a twist in it to check for the PARTITION_KEY_RANGE_ID header setting and only return the matching entry from the internal map.
private Mono<Void> applySessionToken(RxDocumentServiceRequest request) { Map<String, String> headers = request.getHeaders(); Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null"); if (isMasterOperation(request.getResourceType(), request.getOperationType())) { if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return Mono.empty(); } if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { return Mono.empty(); } boolean sessionConsistency = RequestHelper.getConsistencyLevelToUse(this.gatewayServiceConfigurationReader, request) == ConsistencyLevel.SESSION; if (!sessionConsistency || (!request.isReadOnlyRequest() && request.getOperationType() != OperationType.Batch && !this.useMultipleWriteLocations)) { return Mono.empty(); } String partitionKeyRangeId = request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID); PartitionKeyInternal partitionKeyInternal = request.getPartitionKeyInternal(); if (StringUtils.isNotEmpty(partitionKeyRangeId)) { SessionTokenHelper.setPartitionLocalSessionToken(request, partitionKeyRangeId, sessionContainer); return Mono.empty(); } else if (partitionKeyInternal != null) { return this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request). flatMap(collectionValueHolder -> partitionKeyRangeCache.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collectionValueHolder.v.getResourceId(), null, null).flatMap(collectionRoutingMapValueHolder -> { String effectivePartitionKeyString = PartitionKeyInternalHelper .getEffectivePartitionKeyString( partitionKeyInternal, collectionValueHolder.v.getPartitionKey()); PartitionKeyRange range = collectionRoutingMapValueHolder.v.getRangeByEffectivePartitionKey(effectivePartitionKeyString); request.requestContext.resolvedPartitionKeyRange = range; SessionTokenHelper.setPartitionLocalSessionToken(request, sessionContainer); return Mono.empty(); })); } else { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } return Mono.empty(); } }
SessionTokenHelper.setPartitionLocalSessionToken(request, partitionKeyRangeId, sessionContainer);
private Mono<Void> applySessionToken(RxDocumentServiceRequest request) { Map<String, String> headers = request.getHeaders(); Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null"); if (isMasterOperation(request.getResourceType(), request.getOperationType())) { if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return Mono.empty(); } boolean sessionConsistency = RequestHelper.getConsistencyLevelToUse(this.gatewayServiceConfigurationReader, request) == ConsistencyLevel.SESSION; if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { if (!sessionConsistency || (!request.isReadOnlyRequest() && request.getOperationType() != OperationType.Batch && !this.useMultipleWriteLocations)){ request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return Mono.empty(); } if (!sessionConsistency || (!request.isReadOnlyRequest() && request.getOperationType() != OperationType.Batch && !this.useMultipleWriteLocations)) { return Mono.empty(); } if (this.collectionCache != null && this.partitionKeyRangeCache != null) { return this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request). flatMap(collectionValueHolder -> { if(collectionValueHolder== null || collectionValueHolder.v == null) { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } return Mono.empty(); } return partitionKeyRangeCache.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collectionValueHolder.v.getResourceId(), null, null).flatMap(collectionRoutingMapValueHolder -> { if (collectionRoutingMapValueHolder == null || collectionRoutingMapValueHolder.v == null) { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } return Mono.empty(); } String partitionKeyRangeId = request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID); PartitionKeyInternal partitionKeyInternal = request.getPartitionKeyInternal(); if (StringUtils.isNotEmpty(partitionKeyRangeId)) { PartitionKeyRange range = collectionRoutingMapValueHolder.v.getRangeByPartitionKeyRangeId(partitionKeyRangeId); request.requestContext.resolvedPartitionKeyRange = range; if (request.requestContext.resolvedPartitionKeyRange == null) { SessionTokenHelper.setPartitionLocalSessionToken(request, partitionKeyRangeId, sessionContainer); } else { SessionTokenHelper.setPartitionLocalSessionToken(request, sessionContainer); } } else if (partitionKeyInternal != null) { String effectivePartitionKeyString = PartitionKeyInternalHelper .getEffectivePartitionKeyString( partitionKeyInternal, collectionValueHolder.v.getPartitionKey()); PartitionKeyRange range = collectionRoutingMapValueHolder.v.getRangeByEffectivePartitionKey(effectivePartitionKeyString); request.requestContext.resolvedPartitionKeyRange = range; SessionTokenHelper.setPartitionLocalSessionToken(request, sessionContainer); } else { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } } return Mono.empty(); }); }); } else { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } return Mono.empty(); } }
class RxGatewayStoreModel implements RxStoreModel { private final static byte[] EMPTY_BYTE_ARRAY = {}; private final DiagnosticsClientContext clientContext; private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class); private final Map<String, String> defaultHeaders; private final HttpClient httpClient; private final QueryCompatibilityMode queryCompatibilityMode; private final GlobalEndpointManager globalEndpointManager; private ConsistencyLevel defaultConsistencyLevel; private ISessionContainer sessionContainer; private ThroughputControlStore throughputControlStore; private boolean useMultipleWriteLocations; private RxPartitionKeyRangeCache partitionKeyRangeCache; private GatewayServiceConfigurationReader gatewayServiceConfigurationReader; private RxClientCollectionCache collectionCache; public RxGatewayStoreModel( DiagnosticsClientContext clientContext, ISessionContainer sessionContainer, ConsistencyLevel defaultConsistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient) { this.clientContext = clientContext; this.defaultHeaders = new HashMap<>(); this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL, "no-cache"); this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); if (userAgentContainer == null) { userAgentContainer = new UserAgentContainer(); } this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent()); if (defaultConsistencyLevel != null) { this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, defaultConsistencyLevel.toString()); } this.defaultConsistencyLevel = defaultConsistencyLevel; this.globalEndpointManager = globalEndpointManager; this.queryCompatibilityMode = queryCompatibilityMode; this.httpClient = httpClient; this.sessionContainer = sessionContainer; } void setGatewayServiceConfigurationReader(GatewayServiceConfigurationReader gatewayServiceConfigurationReader) { this.gatewayServiceConfigurationReader = gatewayServiceConfigurationReader; } public void setPartitionKeyRangeCache(RxPartitionKeyRangeCache partitionKeyRangeCache) { this.partitionKeyRangeCache = partitionKeyRangeCache; } public void setUseMultipleWriteLocations(boolean useMultipleWriteLocations) { this.useMultipleWriteLocations = useMultipleWriteLocations; } public void setCollectionCache(RxClientCollectionCache collectionCache) { this.collectionCache = collectionCache; } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PATCH); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PUT); } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.DELETE); } private Mono<RxDocumentServiceResponse> deleteByPartitionKey(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { if(request.getOperationType() != OperationType.QueryPlan) { request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true"); } switch (this.queryCompatibilityMode) { case SqlQuery: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.SQL); break; case Default: case Query: default: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.QUERY_JSON); break; } return this.performRequest(request, HttpMethod.POST); } public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) { try { if (request.requestContext.cosmosDiagnostics == null) { request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics(); } URI uri = getUri(request); request.requestContext.resourcePhysicalAddress = uri.toString(); if (this.throughputControlStore != null) { return this.throughputControlStore.processRequest(request, performRequestInternal(request, method, uri)); } return this.performRequestInternal(request, method, uri); } catch (Exception e) { return Mono.error(e); } } /** * Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse. * * @param request * @param method * @param requestUri * @return Flux<RxDocumentServiceResponse> */ public Mono<RxDocumentServiceResponse> performRequestInternal(RxDocumentServiceRequest request, HttpMethod method, URI requestUri) { try { HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders()); Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux(); HttpRequest httpRequest = new HttpRequest(method, requestUri, requestUri.getPort(), httpHeaders, contentAsByteArray); Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds()); if (OperationType.QueryPlan.equals(request.getOperationType())) { responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds()); } else if (request.isAddressRefresh()) { responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()); } Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout); return toDocumentServiceResponse(httpResponseMono, request, httpRequest); } catch (Exception e) { return Mono.error(e); } } private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) { HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size()); for (Entry<String, String> entry : this.defaultHeaders.entrySet()) { if (!headers.containsKey(entry.getKey())) { httpHeaders.set(entry.getKey(), entry.getValue()); } } if (headers != null) { for (Entry<String, String> entry : headers.entrySet()) { if (entry.getValue() == null) { httpHeaders.set(entry.getKey(), ""); } else { httpHeaders.set(entry.getKey(), entry.getValue()); } } } return httpHeaders; } private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException { URI rootUri = request.getEndpointOverride(); if (rootUri == null) { if (request.getIsMedia()) { rootUri = this.globalEndpointManager.getWriteEndpoints().get(0); } else { rootUri = this.globalEndpointManager.resolveServiceEndpoint(request); } } String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed); if(request.getResourceType().equals(ResourceType.DatabaseAccount)) { path = StringUtils.EMPTY; } return new URI("https", null, rootUri.getHost(), rootUri.getPort(), ensureSlashPrefixed(path), null, null); } private String ensureSlashPrefixed(String path) { if (path == null) { return null; } if (path.startsWith("/")) { return path; } return "/" + path; } /** * Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable. * * * Once the customer code subscribes to the observable returned by the CRUD APIs, * the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made. * * @param httpResponseMono * @param request * @return {@link Mono} */ private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono, RxDocumentServiceRequest request, HttpRequest httpRequest) { return httpResponseMono.flatMap(httpResponse -> { HttpHeaders httpResponseHeaders = httpResponse.headers(); int httpResponseStatus = httpResponse.statusCode(); Mono<byte[]> contentObservable = httpResponse .bodyAsByteArray() .switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY)); return contentObservable .map(content -> { ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord(); if (reactorNettyRequestRecord != null) { reactorNettyRequestRecord.setTimeCompleted(Instant.now()); BridgeInternal.setGatewayRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, reactorNettyRequestRecord.takeTimelineSnapshot()); } validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content); StoreResponse rsp = new StoreResponse(httpResponseStatus, HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()), content); DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot()); if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null); DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics); } return rsp; }) .single(); }).map(rsp -> { if (httpRequest.reactorNettyRequestRecord() != null) { return new RxDocumentServiceResponse(this.clientContext, rsp, httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot()); } else { return new RxDocumentServiceResponse(this.clientContext, rsp); } }).onErrorResume(throwable -> { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (!(unwrappedException instanceof Exception)) { logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException); return Mono.error(unwrappedException); } Exception exception = (Exception) unwrappedException; CosmosException dce; if (!(exception instanceof CosmosException)) { logger.error("Network failure", exception); dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, 0, exception); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); } else { dce = (CosmosException) exception; } if (WebExceptionUtility.isNetworkFailure(dce)) { if (WebExceptionUtility.isReadTimeoutException(dce)) { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); } else { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE); } } if (request.requestContext.cosmosDiagnostics != null) { if (BridgeInternal.getClientSideRequestStatics(request.requestContext.cosmosDiagnostics).getGatewayRequestTimeline() == null && httpRequest.reactorNettyRequestRecord() != null) { BridgeInternal.setGatewayRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot()); } BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); }); } private void validateOrThrow(RxDocumentServiceRequest request, HttpResponseStatus status, HttpHeaders headers, byte[] bodyAsBytes) { int statusCode = status.code(); if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) { String statusCodeString = status.reasonPhrase() != null ? status.reasonPhrase().replace(" ", "") : ""; String body = bodyAsBytes != null ? new String(bodyAsBytes) : null; CosmosError cosmosError; cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError(); cosmosError = new CosmosError(statusCodeString, String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString), cosmosError.getPartitionedQueryExecutionInfo()); CosmosException dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, statusCode, cosmosError, headers.toMap()); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); throw dce; } } private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) { switch (request.getOperationType()) { case Create: case Batch: return this.create(request); case Patch: return this.patch(request); case Upsert: return this.upsert(request); case Delete: if (request.getResourceType() == ResourceType.PartitionKey) { return this.deleteByPartitionKey(request); } return this.delete(request); case ExecuteJavaScript: return this.execute(request); case Read: return this.read(request); case ReadFeed: return this.readFeed(request); case Replace: return this.replace(request); case SqlQuery: case Query: case QueryPlan: return this.query(request); default: throw new IllegalStateException("Unknown operation setType " + request.getOperationType()); } } private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) { Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single(); return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy(BridgeInternal.getRetryContext(request.requestContext.cosmosDiagnostics))); } @Override public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) { Mono<RxDocumentServiceResponse> responseObs = this.applySessionToken(request).then(invokeAsync(request)); return responseObs.onErrorResume( e -> { CosmosException dce = Utils.as(e, CosmosException.class); if (dce == null) { logger.error("unexpected failure {}", e.getMessage(), e); return Mono.error(e); } if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) && (dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED || dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT || ( dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND && !Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) { this.captureSessionToken(request, dce.getResponseHeaders()); } if (Exceptions.isThroughputControlRequestRateTooLargeException(dce)) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); } ).map(response -> { this.captureSessionToken(request, response.getResponseHeaders()); return response; } ); } @Override public void enableThroughputControl(ThroughputControlStore throughputControlStore) { } private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { if (request.getResourceType() == ResourceType.DocumentCollection && request.getOperationType() == OperationType.Delete) { String resourceId; if (request.getIsNameBased()) { resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID); } else { resourceId = request.getResourceId(); } this.sessionContainer.clearTokenByResourceId(resourceId); } else { this.sessionContainer.setSessionToken(request, responseHeaders); } } private static boolean isMasterOperation(ResourceType resourceType, OperationType operationType) { return ReplicatedResourceClientUtils.isMasterResource(resourceType) || isStoredProcedureMasterOperation(resourceType, operationType) || operationType == OperationType.QueryPlan; } private static boolean isStoredProcedureMasterOperation(ResourceType resourceType, OperationType operationType) { return resourceType == ResourceType.StoredProcedure && operationType != OperationType.ExecuteJavaScript; } }
class RxGatewayStoreModel implements RxStoreModel { private final static byte[] EMPTY_BYTE_ARRAY = {}; private final DiagnosticsClientContext clientContext; private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class); private final Map<String, String> defaultHeaders; private final HttpClient httpClient; private final QueryCompatibilityMode queryCompatibilityMode; private final GlobalEndpointManager globalEndpointManager; private ConsistencyLevel defaultConsistencyLevel; private ISessionContainer sessionContainer; private ThroughputControlStore throughputControlStore; private boolean useMultipleWriteLocations; private RxPartitionKeyRangeCache partitionKeyRangeCache; private GatewayServiceConfigurationReader gatewayServiceConfigurationReader; private RxClientCollectionCache collectionCache; public RxGatewayStoreModel( DiagnosticsClientContext clientContext, ISessionContainer sessionContainer, ConsistencyLevel defaultConsistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient, ApiType apiType) { this.clientContext = clientContext; this.defaultHeaders = new HashMap<>(); this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL, "no-cache"); this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); if (apiType != null){ this.defaultHeaders.put(HttpConstants.HttpHeaders.API_TYPE, apiType.toString()); } if (userAgentContainer == null) { userAgentContainer = new UserAgentContainer(); } this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent()); if (defaultConsistencyLevel != null) { this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, defaultConsistencyLevel.toString()); } this.defaultConsistencyLevel = defaultConsistencyLevel; this.globalEndpointManager = globalEndpointManager; this.queryCompatibilityMode = queryCompatibilityMode; this.httpClient = httpClient; this.sessionContainer = sessionContainer; } void setGatewayServiceConfigurationReader(GatewayServiceConfigurationReader gatewayServiceConfigurationReader) { this.gatewayServiceConfigurationReader = gatewayServiceConfigurationReader; } public void setPartitionKeyRangeCache(RxPartitionKeyRangeCache partitionKeyRangeCache) { this.partitionKeyRangeCache = partitionKeyRangeCache; } public void setUseMultipleWriteLocations(boolean useMultipleWriteLocations) { this.useMultipleWriteLocations = useMultipleWriteLocations; } boolean isUseMultipleWriteLocations() { return useMultipleWriteLocations; } RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return partitionKeyRangeCache; } GatewayServiceConfigurationReader getGatewayServiceConfigurationReader() { return gatewayServiceConfigurationReader; } RxClientCollectionCache getCollectionCache() { return collectionCache; } public void setCollectionCache(RxClientCollectionCache collectionCache) { this.collectionCache = collectionCache; } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PATCH); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PUT); } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.DELETE); } private Mono<RxDocumentServiceResponse> deleteByPartitionKey(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { if(request.getOperationType() != OperationType.QueryPlan) { request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true"); } switch (this.queryCompatibilityMode) { case SqlQuery: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.SQL); break; case Default: case Query: default: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.QUERY_JSON); break; } return this.performRequest(request, HttpMethod.POST); } public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) { try { if (request.requestContext.cosmosDiagnostics == null) { request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics(); } URI uri = getUri(request); request.requestContext.resourcePhysicalAddress = uri.toString(); if (this.throughputControlStore != null) { return this.throughputControlStore.processRequest(request, performRequestInternal(request, method, uri)); } return this.performRequestInternal(request, method, uri); } catch (Exception e) { return Mono.error(e); } } /** * Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse. * * @param request * @param method * @param requestUri * @return Flux<RxDocumentServiceResponse> */ public Mono<RxDocumentServiceResponse> performRequestInternal(RxDocumentServiceRequest request, HttpMethod method, URI requestUri) { try { HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders()); Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux(); HttpRequest httpRequest = new HttpRequest(method, requestUri, requestUri.getPort(), httpHeaders, contentAsByteArray); Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds()); if (OperationType.QueryPlan.equals(request.getOperationType())) { responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds()); } else if (request.isAddressRefresh()) { responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()); } Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout); return toDocumentServiceResponse(httpResponseMono, request, httpRequest); } catch (Exception e) { return Mono.error(e); } } private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) { HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size()); for (Entry<String, String> entry : this.defaultHeaders.entrySet()) { if (!headers.containsKey(entry.getKey())) { httpHeaders.set(entry.getKey(), entry.getValue()); } } if (headers != null) { for (Entry<String, String> entry : headers.entrySet()) { if (entry.getValue() == null) { httpHeaders.set(entry.getKey(), ""); } else { httpHeaders.set(entry.getKey(), entry.getValue()); } } } return httpHeaders; } private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException { URI rootUri = request.getEndpointOverride(); if (rootUri == null) { if (request.getIsMedia()) { rootUri = this.globalEndpointManager.getWriteEndpoints().get(0); } else { rootUri = this.globalEndpointManager.resolveServiceEndpoint(request); } } String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed); if(request.getResourceType().equals(ResourceType.DatabaseAccount)) { path = StringUtils.EMPTY; } return new URI("https", null, rootUri.getHost(), rootUri.getPort(), ensureSlashPrefixed(path), null, null); } private String ensureSlashPrefixed(String path) { if (path == null) { return null; } if (path.startsWith("/")) { return path; } return "/" + path; } /** * Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable. * * * Once the customer code subscribes to the observable returned by the CRUD APIs, * the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made. * * @param httpResponseMono * @param request * @return {@link Mono} */ private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono, RxDocumentServiceRequest request, HttpRequest httpRequest) { return httpResponseMono.flatMap(httpResponse -> { HttpHeaders httpResponseHeaders = httpResponse.headers(); int httpResponseStatus = httpResponse.statusCode(); Mono<byte[]> contentObservable = httpResponse .bodyAsByteArray() .switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY)); return contentObservable .map(content -> { ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord(); if (reactorNettyRequestRecord != null) { reactorNettyRequestRecord.setTimeCompleted(Instant.now()); BridgeInternal.setGatewayRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, reactorNettyRequestRecord.takeTimelineSnapshot()); } validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content); StoreResponse rsp = new StoreResponse(httpResponseStatus, HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()), content); DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot()); if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null); DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics); } return rsp; }) .single(); }).map(rsp -> { if (httpRequest.reactorNettyRequestRecord() != null) { return new RxDocumentServiceResponse(this.clientContext, rsp, httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot()); } else { return new RxDocumentServiceResponse(this.clientContext, rsp); } }).onErrorResume(throwable -> { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (!(unwrappedException instanceof Exception)) { logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException); return Mono.error(unwrappedException); } Exception exception = (Exception) unwrappedException; CosmosException dce; if (!(exception instanceof CosmosException)) { logger.error("Network failure", exception); dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, 0, exception); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); } else { dce = (CosmosException) exception; } if (WebExceptionUtility.isNetworkFailure(dce)) { if (WebExceptionUtility.isReadTimeoutException(dce)) { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); } else { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE); } } if (request.requestContext.cosmosDiagnostics != null) { if (BridgeInternal.getClientSideRequestStatics(request.requestContext.cosmosDiagnostics).getGatewayRequestTimeline() == null && httpRequest.reactorNettyRequestRecord() != null) { BridgeInternal.setGatewayRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot()); } BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); }); } private void validateOrThrow(RxDocumentServiceRequest request, HttpResponseStatus status, HttpHeaders headers, byte[] bodyAsBytes) { int statusCode = status.code(); if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) { String statusCodeString = status.reasonPhrase() != null ? status.reasonPhrase().replace(" ", "") : ""; String body = bodyAsBytes != null ? new String(bodyAsBytes) : null; CosmosError cosmosError; cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError(); cosmosError = new CosmosError(statusCodeString, String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString), cosmosError.getPartitionedQueryExecutionInfo()); CosmosException dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, statusCode, cosmosError, headers.toMap()); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); throw dce; } } private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) { switch (request.getOperationType()) { case Create: case Batch: return this.create(request); case Patch: return this.patch(request); case Upsert: return this.upsert(request); case Delete: if (request.getResourceType() == ResourceType.PartitionKey) { return this.deleteByPartitionKey(request); } return this.delete(request); case ExecuteJavaScript: return this.execute(request); case Read: return this.read(request); case ReadFeed: return this.readFeed(request); case Replace: return this.replace(request); case SqlQuery: case Query: case QueryPlan: return this.query(request); default: throw new IllegalStateException("Unknown operation setType " + request.getOperationType()); } } private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) { Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single(); return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy(BridgeInternal.getRetryContext(request.requestContext.cosmosDiagnostics))); } @Override public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) { Mono<RxDocumentServiceResponse> responseObs = this.addIntendedCollectionRidAndSessionToken(request).then(invokeAsync(request)); return responseObs.onErrorResume( e -> { CosmosException dce = Utils.as(e, CosmosException.class); if (dce == null) { logger.error("unexpected failure {}", e.getMessage(), e); return Mono.error(e); } if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) && (dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED || dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT || ( dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND && !Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) { this.captureSessionToken(request, dce.getResponseHeaders()); } if (Exceptions.isThroughputControlRequestRateTooLargeException(dce)) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); } ).flatMap(response -> this.captureSessionTokenAndHandlePartitionSplit(request, response.getResponseHeaders()).then(Mono.just(response)) ); } @Override public void enableThroughputControl(ThroughputControlStore throughputControlStore) { } private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { if (request.getResourceType() == ResourceType.DocumentCollection && request.getOperationType() == OperationType.Delete) { String resourceId; if (request.getIsNameBased()) { resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID); } else { resourceId = request.getResourceId(); } this.sessionContainer.clearTokenByResourceId(resourceId); } else { this.sessionContainer.setSessionToken(request, responseHeaders); } } private Mono<Void> captureSessionTokenAndHandlePartitionSplit(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { this.captureSessionToken(request, responseHeaders); if (request.requestContext.resolvedPartitionKeyRange != null && StringUtils.isNotEmpty(request.requestContext.resolvedCollectionRid) && StringUtils.isNotEmpty(responseHeaders.get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID)) && !responseHeaders.get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID).equals(request.requestContext.resolvedPartitionKeyRange.getId())) { return this.partitionKeyRangeCache.refreshAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request.requestContext.resolvedCollectionRid) .flatMap(collectionRoutingMapValueHolder -> Mono.empty()); } return Mono.empty(); } private Mono<Void> addIntendedCollectionRidAndSessionToken(RxDocumentServiceRequest request) { return applySessionToken(request).then(addIntendedCollectionRid(request)); } private Mono<Void> addIntendedCollectionRid(RxDocumentServiceRequest request) { if (this.collectionCache != null && request.getResourceType().equals(ResourceType.Document)) { return this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request).flatMap(documentCollectionValueHolder -> { if (StringUtils.isEmpty(request.getHeaders().get(INTENDED_COLLECTION_RID_HEADER))) { request.getHeaders().put(INTENDED_COLLECTION_RID_HEADER, request.requestContext.resolvedCollectionRid); } else { request.intendedCollectionRidPassedIntoSDK = true; } return Mono.empty(); }); } return Mono.empty(); } private static boolean isMasterOperation(ResourceType resourceType, OperationType operationType) { return ReplicatedResourceClientUtils.isMasterResource(resourceType) || isStoredProcedureMasterOperation(resourceType, operationType) || operationType == OperationType.QueryPlan; } private static boolean isStoredProcedureMasterOperation(ResourceType resourceType, OperationType operationType) { return resourceType == ResourceType.StoredProcedure && operationType != OperationType.ExecuteJavaScript; } }
This may short-circuit to eagerly and not remove any potential dangling JSON nodes in the tree that need to be removed. The following is an example we should discuss how we want to handle it: ```java @JsonProperty("a.flattened.property") @JsonFlatten private String theProperty; ``` If we have the following JSON: ```json { "a": { "flattened": { } } } ``` Right now, that should cause this code path to hit and "a.flattened.property" will be correctly identified as null. But, should this remove the sub-node "flattened" as it is empty and was expected to contain a value? If there are other JSON configurations such as `JsonAnySetter` or an additional properties map it will likely insert a key-value pair of `flattened : null`. @srnagar do you have any thoughts on how this should be handled?
private void handleFlatteningForField(AnnotatedField annotatedField, JsonNode jsonNode) { final JsonProperty jsonProperty = annotatedField.getAnnotation(JsonProperty.class); if (jsonProperty != null) { final String jsonPropValue = jsonProperty.value(); if (jsonNode.has(jsonPropValue)) { final String escapedJsonPropValue = jsonPropValue.replace(".", "\\."); ((ObjectNode) jsonNode).set(escapedJsonPropValue, jsonNode.get(jsonPropValue)); } if ((classHasJsonFlatten || annotatedField.hasAnnotation(JsonFlatten.class)) && IS_FLATTENED_PATTERN.matcher(jsonPropValue).matches()) { String[] jsonNodeKeys = Arrays.stream(SPLIT_KEY_PATTERN.split(jsonPropValue)) .map(FlatteningDeserializer::unescapeEscapedDots) .toArray(String[]::new); int depth = 0; List<JsonNode> nodePath = new ArrayList<>(); nodePath.add(jsonNode); depth++; JsonNode nodeToAdd = jsonNode; for (String jsonNodeKey : jsonNodeKeys) { nodeToAdd = nodeToAdd.get(jsonNodeKey); depth++; if (nodeToAdd == null) { break; } nodePath.add(nodeToAdd); } if (nodePath.size() == depth - 1) { ((ObjectNode) jsonNode).set(jsonPropValue, null); return; } if (!nodePath.get(nodePath.size() - 2).has(jsonNodeKeys[jsonNodeKeys.length - 1])) { ((ObjectNode) jsonNode).set(jsonPropValue, null); } else { ((ObjectNode) jsonNode).set(jsonPropValue, nodePath.get(nodePath.size() - 1)); } for (int i = nodePath.size() - 2; i >= 0; i--) { if (i == nodePath.size() - 2 && nodePath.size() - 1 != jsonNodeKeys.length && nodePath.get(i).get(jsonNodeKeys[i]).size() != 0) { break; } ((ObjectNode) nodePath.get(i)).remove(jsonNodeKeys[i]); if (nodePath.get(i).size() > 0) { break; } } } } }
if (nodePath.size() == depth - 1) {
private void handleFlatteningForField(AnnotatedField annotatedField, JsonNode jsonNode) { final JsonProperty jsonProperty = annotatedField.getAnnotation(JsonProperty.class); if (jsonProperty != null) { final String jsonPropValue = jsonProperty.value(); if (jsonNode.has(jsonPropValue)) { final String escapedJsonPropValue = jsonPropValue.replace(".", "\\."); ((ObjectNode) jsonNode).set(escapedJsonPropValue, jsonNode.get(jsonPropValue)); } if ((classHasJsonFlatten || annotatedField.hasAnnotation(JsonFlatten.class)) && IS_FLATTENED_PATTERN.matcher(jsonPropValue).matches()) { String[] jsonNodeKeys = Arrays.stream(SPLIT_KEY_PATTERN.split(jsonPropValue)) .map(FlatteningDeserializer::unescapeEscapedDots) .toArray(String[]::new); int depth = 0; List<JsonNode> nodePath = new ArrayList<>(); nodePath.add(jsonNode); depth++; JsonNode nodeToAdd = jsonNode; for (String jsonNodeKey : jsonNodeKeys) { nodeToAdd = nodeToAdd.get(jsonNodeKey); depth++; if (nodeToAdd == null) { break; } nodePath.add(nodeToAdd); } if (nodePath.size() == depth - 1) { ((ObjectNode) jsonNode).set(jsonPropValue, null); return; } if (!nodePath.get(nodePath.size() - 2).has(jsonNodeKeys[jsonNodeKeys.length - 1])) { ((ObjectNode) jsonNode).set(jsonPropValue, null); } else { ((ObjectNode) jsonNode).set(jsonPropValue, nodePath.get(nodePath.size() - 1)); } for (int i = nodePath.size() - 2; i >= 0; i--) { if (i == nodePath.size() - 2 && nodePath.size() - 1 != jsonNodeKeys.length && nodePath.get(i).get(jsonNodeKeys[i]).size() != 0) { break; } ((ObjectNode) nodePath.get(i)).remove(jsonNodeKeys[i]); if (nodePath.get(i).size() > 0) { break; } } } } }
class that field belongs to */
class that field belongs to */
we are adding couple extra "flatMap" operations in here which could potentially impact the latency of the call. Is there any way to avoid these extra calls?
private Mono<Void> applySessionToken(RxDocumentServiceRequest request) { Map<String, String> headers = request.getHeaders(); Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null"); if (isMasterOperation(request.getResourceType(), request.getOperationType())) { if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return Mono.empty(); } if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { return Mono.empty(); } boolean sessionConsistency = RequestHelper.getConsistencyLevelToUse(this.gatewayServiceConfigurationReader, request) == ConsistencyLevel.SESSION; if (!sessionConsistency || (!request.isReadOnlyRequest() && request.getOperationType() != OperationType.Batch && !this.useMultipleWriteLocations)) { return Mono.empty(); } String partitionKeyRangeId = request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID); PartitionKeyInternal partitionKeyInternal = request.getPartitionKeyInternal(); if (StringUtils.isNotEmpty(partitionKeyRangeId)) { SessionTokenHelper.setPartitionLocalSessionToken(request, partitionKeyRangeId, sessionContainer); return Mono.empty(); } else if (partitionKeyInternal != null) { return this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request). flatMap(collectionValueHolder -> partitionKeyRangeCache.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collectionValueHolder.v.getResourceId(), null, null).flatMap(collectionRoutingMapValueHolder -> { String effectivePartitionKeyString = PartitionKeyInternalHelper .getEffectivePartitionKeyString( partitionKeyInternal, collectionValueHolder.v.getPartitionKey()); PartitionKeyRange range = collectionRoutingMapValueHolder.v.getRangeByEffectivePartitionKey(effectivePartitionKeyString); request.requestContext.resolvedPartitionKeyRange = range; SessionTokenHelper.setPartitionLocalSessionToken(request, sessionContainer); return Mono.empty(); })); } else { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } return Mono.empty(); } }
return this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request).
private Mono<Void> applySessionToken(RxDocumentServiceRequest request) { Map<String, String> headers = request.getHeaders(); Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null"); if (isMasterOperation(request.getResourceType(), request.getOperationType())) { if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return Mono.empty(); } boolean sessionConsistency = RequestHelper.getConsistencyLevelToUse(this.gatewayServiceConfigurationReader, request) == ConsistencyLevel.SESSION; if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { if (!sessionConsistency || (!request.isReadOnlyRequest() && request.getOperationType() != OperationType.Batch && !this.useMultipleWriteLocations)){ request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return Mono.empty(); } if (!sessionConsistency || (!request.isReadOnlyRequest() && request.getOperationType() != OperationType.Batch && !this.useMultipleWriteLocations)) { return Mono.empty(); } if (this.collectionCache != null && this.partitionKeyRangeCache != null) { return this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request). flatMap(collectionValueHolder -> { if(collectionValueHolder== null || collectionValueHolder.v == null) { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } return Mono.empty(); } return partitionKeyRangeCache.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collectionValueHolder.v.getResourceId(), null, null).flatMap(collectionRoutingMapValueHolder -> { if (collectionRoutingMapValueHolder == null || collectionRoutingMapValueHolder.v == null) { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } return Mono.empty(); } String partitionKeyRangeId = request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID); PartitionKeyInternal partitionKeyInternal = request.getPartitionKeyInternal(); if (StringUtils.isNotEmpty(partitionKeyRangeId)) { PartitionKeyRange range = collectionRoutingMapValueHolder.v.getRangeByPartitionKeyRangeId(partitionKeyRangeId); request.requestContext.resolvedPartitionKeyRange = range; if (request.requestContext.resolvedPartitionKeyRange == null) { SessionTokenHelper.setPartitionLocalSessionToken(request, partitionKeyRangeId, sessionContainer); } else { SessionTokenHelper.setPartitionLocalSessionToken(request, sessionContainer); } } else if (partitionKeyInternal != null) { String effectivePartitionKeyString = PartitionKeyInternalHelper .getEffectivePartitionKeyString( partitionKeyInternal, collectionValueHolder.v.getPartitionKey()); PartitionKeyRange range = collectionRoutingMapValueHolder.v.getRangeByEffectivePartitionKey(effectivePartitionKeyString); request.requestContext.resolvedPartitionKeyRange = range; SessionTokenHelper.setPartitionLocalSessionToken(request, sessionContainer); } else { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } } return Mono.empty(); }); }); } else { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } return Mono.empty(); } }
class RxGatewayStoreModel implements RxStoreModel { private final static byte[] EMPTY_BYTE_ARRAY = {}; private final DiagnosticsClientContext clientContext; private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class); private final Map<String, String> defaultHeaders; private final HttpClient httpClient; private final QueryCompatibilityMode queryCompatibilityMode; private final GlobalEndpointManager globalEndpointManager; private ConsistencyLevel defaultConsistencyLevel; private ISessionContainer sessionContainer; private ThroughputControlStore throughputControlStore; private boolean useMultipleWriteLocations; private RxPartitionKeyRangeCache partitionKeyRangeCache; private GatewayServiceConfigurationReader gatewayServiceConfigurationReader; private RxClientCollectionCache collectionCache; public RxGatewayStoreModel( DiagnosticsClientContext clientContext, ISessionContainer sessionContainer, ConsistencyLevel defaultConsistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient) { this.clientContext = clientContext; this.defaultHeaders = new HashMap<>(); this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL, "no-cache"); this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); if (userAgentContainer == null) { userAgentContainer = new UserAgentContainer(); } this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent()); if (defaultConsistencyLevel != null) { this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, defaultConsistencyLevel.toString()); } this.defaultConsistencyLevel = defaultConsistencyLevel; this.globalEndpointManager = globalEndpointManager; this.queryCompatibilityMode = queryCompatibilityMode; this.httpClient = httpClient; this.sessionContainer = sessionContainer; } void setGatewayServiceConfigurationReader(GatewayServiceConfigurationReader gatewayServiceConfigurationReader) { this.gatewayServiceConfigurationReader = gatewayServiceConfigurationReader; } public void setPartitionKeyRangeCache(RxPartitionKeyRangeCache partitionKeyRangeCache) { this.partitionKeyRangeCache = partitionKeyRangeCache; } public void setUseMultipleWriteLocations(boolean useMultipleWriteLocations) { this.useMultipleWriteLocations = useMultipleWriteLocations; } public void setCollectionCache(RxClientCollectionCache collectionCache) { this.collectionCache = collectionCache; } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PATCH); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PUT); } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.DELETE); } private Mono<RxDocumentServiceResponse> deleteByPartitionKey(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { if(request.getOperationType() != OperationType.QueryPlan) { request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true"); } switch (this.queryCompatibilityMode) { case SqlQuery: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.SQL); break; case Default: case Query: default: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.QUERY_JSON); break; } return this.performRequest(request, HttpMethod.POST); } public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) { try { if (request.requestContext.cosmosDiagnostics == null) { request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics(); } URI uri = getUri(request); request.requestContext.resourcePhysicalAddress = uri.toString(); if (this.throughputControlStore != null) { return this.throughputControlStore.processRequest(request, performRequestInternal(request, method, uri)); } return this.performRequestInternal(request, method, uri); } catch (Exception e) { return Mono.error(e); } } /** * Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse. * * @param request * @param method * @param requestUri * @return Flux<RxDocumentServiceResponse> */ public Mono<RxDocumentServiceResponse> performRequestInternal(RxDocumentServiceRequest request, HttpMethod method, URI requestUri) { try { HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders()); Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux(); HttpRequest httpRequest = new HttpRequest(method, requestUri, requestUri.getPort(), httpHeaders, contentAsByteArray); Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds()); if (OperationType.QueryPlan.equals(request.getOperationType())) { responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds()); } else if (request.isAddressRefresh()) { responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()); } Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout); return toDocumentServiceResponse(httpResponseMono, request, httpRequest); } catch (Exception e) { return Mono.error(e); } } private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) { HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size()); for (Entry<String, String> entry : this.defaultHeaders.entrySet()) { if (!headers.containsKey(entry.getKey())) { httpHeaders.set(entry.getKey(), entry.getValue()); } } if (headers != null) { for (Entry<String, String> entry : headers.entrySet()) { if (entry.getValue() == null) { httpHeaders.set(entry.getKey(), ""); } else { httpHeaders.set(entry.getKey(), entry.getValue()); } } } return httpHeaders; } private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException { URI rootUri = request.getEndpointOverride(); if (rootUri == null) { if (request.getIsMedia()) { rootUri = this.globalEndpointManager.getWriteEndpoints().get(0); } else { rootUri = this.globalEndpointManager.resolveServiceEndpoint(request); } } String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed); if(request.getResourceType().equals(ResourceType.DatabaseAccount)) { path = StringUtils.EMPTY; } return new URI("https", null, rootUri.getHost(), rootUri.getPort(), ensureSlashPrefixed(path), null, null); } private String ensureSlashPrefixed(String path) { if (path == null) { return null; } if (path.startsWith("/")) { return path; } return "/" + path; } /** * Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable. * * * Once the customer code subscribes to the observable returned by the CRUD APIs, * the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made. * * @param httpResponseMono * @param request * @return {@link Mono} */ private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono, RxDocumentServiceRequest request, HttpRequest httpRequest) { return httpResponseMono.flatMap(httpResponse -> { HttpHeaders httpResponseHeaders = httpResponse.headers(); int httpResponseStatus = httpResponse.statusCode(); Mono<byte[]> contentObservable = httpResponse .bodyAsByteArray() .switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY)); return contentObservable .map(content -> { ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord(); if (reactorNettyRequestRecord != null) { reactorNettyRequestRecord.setTimeCompleted(Instant.now()); BridgeInternal.setGatewayRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, reactorNettyRequestRecord.takeTimelineSnapshot()); } validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content); StoreResponse rsp = new StoreResponse(httpResponseStatus, HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()), content); DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot()); if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null); DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics); } return rsp; }) .single(); }).map(rsp -> { if (httpRequest.reactorNettyRequestRecord() != null) { return new RxDocumentServiceResponse(this.clientContext, rsp, httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot()); } else { return new RxDocumentServiceResponse(this.clientContext, rsp); } }).onErrorResume(throwable -> { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (!(unwrappedException instanceof Exception)) { logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException); return Mono.error(unwrappedException); } Exception exception = (Exception) unwrappedException; CosmosException dce; if (!(exception instanceof CosmosException)) { logger.error("Network failure", exception); dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, 0, exception); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); } else { dce = (CosmosException) exception; } if (WebExceptionUtility.isNetworkFailure(dce)) { if (WebExceptionUtility.isReadTimeoutException(dce)) { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); } else { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE); } } if (request.requestContext.cosmosDiagnostics != null) { if (BridgeInternal.getClientSideRequestStatics(request.requestContext.cosmosDiagnostics).getGatewayRequestTimeline() == null && httpRequest.reactorNettyRequestRecord() != null) { BridgeInternal.setGatewayRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot()); } BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); }); } private void validateOrThrow(RxDocumentServiceRequest request, HttpResponseStatus status, HttpHeaders headers, byte[] bodyAsBytes) { int statusCode = status.code(); if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) { String statusCodeString = status.reasonPhrase() != null ? status.reasonPhrase().replace(" ", "") : ""; String body = bodyAsBytes != null ? new String(bodyAsBytes) : null; CosmosError cosmosError; cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError(); cosmosError = new CosmosError(statusCodeString, String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString), cosmosError.getPartitionedQueryExecutionInfo()); CosmosException dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, statusCode, cosmosError, headers.toMap()); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); throw dce; } } private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) { switch (request.getOperationType()) { case Create: case Batch: return this.create(request); case Patch: return this.patch(request); case Upsert: return this.upsert(request); case Delete: if (request.getResourceType() == ResourceType.PartitionKey) { return this.deleteByPartitionKey(request); } return this.delete(request); case ExecuteJavaScript: return this.execute(request); case Read: return this.read(request); case ReadFeed: return this.readFeed(request); case Replace: return this.replace(request); case SqlQuery: case Query: case QueryPlan: return this.query(request); default: throw new IllegalStateException("Unknown operation setType " + request.getOperationType()); } } private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) { Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single(); return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy(BridgeInternal.getRetryContext(request.requestContext.cosmosDiagnostics))); } @Override public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) { Mono<RxDocumentServiceResponse> responseObs = this.applySessionToken(request).then(invokeAsync(request)); return responseObs.onErrorResume( e -> { CosmosException dce = Utils.as(e, CosmosException.class); if (dce == null) { logger.error("unexpected failure {}", e.getMessage(), e); return Mono.error(e); } if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) && (dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED || dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT || ( dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND && !Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) { this.captureSessionToken(request, dce.getResponseHeaders()); } if (Exceptions.isThroughputControlRequestRateTooLargeException(dce)) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); } ).map(response -> { this.captureSessionToken(request, response.getResponseHeaders()); return response; } ); } @Override public void enableThroughputControl(ThroughputControlStore throughputControlStore) { } private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { if (request.getResourceType() == ResourceType.DocumentCollection && request.getOperationType() == OperationType.Delete) { String resourceId; if (request.getIsNameBased()) { resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID); } else { resourceId = request.getResourceId(); } this.sessionContainer.clearTokenByResourceId(resourceId); } else { this.sessionContainer.setSessionToken(request, responseHeaders); } } private static boolean isMasterOperation(ResourceType resourceType, OperationType operationType) { return ReplicatedResourceClientUtils.isMasterResource(resourceType) || isStoredProcedureMasterOperation(resourceType, operationType) || operationType == OperationType.QueryPlan; } private static boolean isStoredProcedureMasterOperation(ResourceType resourceType, OperationType operationType) { return resourceType == ResourceType.StoredProcedure && operationType != OperationType.ExecuteJavaScript; } }
class RxGatewayStoreModel implements RxStoreModel { private final static byte[] EMPTY_BYTE_ARRAY = {}; private final DiagnosticsClientContext clientContext; private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class); private final Map<String, String> defaultHeaders; private final HttpClient httpClient; private final QueryCompatibilityMode queryCompatibilityMode; private final GlobalEndpointManager globalEndpointManager; private ConsistencyLevel defaultConsistencyLevel; private ISessionContainer sessionContainer; private ThroughputControlStore throughputControlStore; private boolean useMultipleWriteLocations; private RxPartitionKeyRangeCache partitionKeyRangeCache; private GatewayServiceConfigurationReader gatewayServiceConfigurationReader; private RxClientCollectionCache collectionCache; public RxGatewayStoreModel( DiagnosticsClientContext clientContext, ISessionContainer sessionContainer, ConsistencyLevel defaultConsistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient, ApiType apiType) { this.clientContext = clientContext; this.defaultHeaders = new HashMap<>(); this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL, "no-cache"); this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); if (apiType != null){ this.defaultHeaders.put(HttpConstants.HttpHeaders.API_TYPE, apiType.toString()); } if (userAgentContainer == null) { userAgentContainer = new UserAgentContainer(); } this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent()); if (defaultConsistencyLevel != null) { this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, defaultConsistencyLevel.toString()); } this.defaultConsistencyLevel = defaultConsistencyLevel; this.globalEndpointManager = globalEndpointManager; this.queryCompatibilityMode = queryCompatibilityMode; this.httpClient = httpClient; this.sessionContainer = sessionContainer; } void setGatewayServiceConfigurationReader(GatewayServiceConfigurationReader gatewayServiceConfigurationReader) { this.gatewayServiceConfigurationReader = gatewayServiceConfigurationReader; } public void setPartitionKeyRangeCache(RxPartitionKeyRangeCache partitionKeyRangeCache) { this.partitionKeyRangeCache = partitionKeyRangeCache; } public void setUseMultipleWriteLocations(boolean useMultipleWriteLocations) { this.useMultipleWriteLocations = useMultipleWriteLocations; } boolean isUseMultipleWriteLocations() { return useMultipleWriteLocations; } RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return partitionKeyRangeCache; } GatewayServiceConfigurationReader getGatewayServiceConfigurationReader() { return gatewayServiceConfigurationReader; } RxClientCollectionCache getCollectionCache() { return collectionCache; } public void setCollectionCache(RxClientCollectionCache collectionCache) { this.collectionCache = collectionCache; } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PATCH); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PUT); } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.DELETE); } private Mono<RxDocumentServiceResponse> deleteByPartitionKey(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { if(request.getOperationType() != OperationType.QueryPlan) { request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true"); } switch (this.queryCompatibilityMode) { case SqlQuery: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.SQL); break; case Default: case Query: default: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.QUERY_JSON); break; } return this.performRequest(request, HttpMethod.POST); } public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) { try { if (request.requestContext.cosmosDiagnostics == null) { request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics(); } URI uri = getUri(request); request.requestContext.resourcePhysicalAddress = uri.toString(); if (this.throughputControlStore != null) { return this.throughputControlStore.processRequest(request, performRequestInternal(request, method, uri)); } return this.performRequestInternal(request, method, uri); } catch (Exception e) { return Mono.error(e); } } /** * Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse. * * @param request * @param method * @param requestUri * @return Flux<RxDocumentServiceResponse> */ public Mono<RxDocumentServiceResponse> performRequestInternal(RxDocumentServiceRequest request, HttpMethod method, URI requestUri) { try { HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders()); Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux(); HttpRequest httpRequest = new HttpRequest(method, requestUri, requestUri.getPort(), httpHeaders, contentAsByteArray); Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds()); if (OperationType.QueryPlan.equals(request.getOperationType())) { responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds()); } else if (request.isAddressRefresh()) { responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()); } Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout); return toDocumentServiceResponse(httpResponseMono, request, httpRequest); } catch (Exception e) { return Mono.error(e); } } private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) { HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size()); for (Entry<String, String> entry : this.defaultHeaders.entrySet()) { if (!headers.containsKey(entry.getKey())) { httpHeaders.set(entry.getKey(), entry.getValue()); } } if (headers != null) { for (Entry<String, String> entry : headers.entrySet()) { if (entry.getValue() == null) { httpHeaders.set(entry.getKey(), ""); } else { httpHeaders.set(entry.getKey(), entry.getValue()); } } } return httpHeaders; } private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException { URI rootUri = request.getEndpointOverride(); if (rootUri == null) { if (request.getIsMedia()) { rootUri = this.globalEndpointManager.getWriteEndpoints().get(0); } else { rootUri = this.globalEndpointManager.resolveServiceEndpoint(request); } } String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed); if(request.getResourceType().equals(ResourceType.DatabaseAccount)) { path = StringUtils.EMPTY; } return new URI("https", null, rootUri.getHost(), rootUri.getPort(), ensureSlashPrefixed(path), null, null); } private String ensureSlashPrefixed(String path) { if (path == null) { return null; } if (path.startsWith("/")) { return path; } return "/" + path; } /** * Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable. * * * Once the customer code subscribes to the observable returned by the CRUD APIs, * the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made. * * @param httpResponseMono * @param request * @return {@link Mono} */ private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono, RxDocumentServiceRequest request, HttpRequest httpRequest) { return httpResponseMono.flatMap(httpResponse -> { HttpHeaders httpResponseHeaders = httpResponse.headers(); int httpResponseStatus = httpResponse.statusCode(); Mono<byte[]> contentObservable = httpResponse .bodyAsByteArray() .switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY)); return contentObservable .map(content -> { ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord(); if (reactorNettyRequestRecord != null) { reactorNettyRequestRecord.setTimeCompleted(Instant.now()); BridgeInternal.setGatewayRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, reactorNettyRequestRecord.takeTimelineSnapshot()); } validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content); StoreResponse rsp = new StoreResponse(httpResponseStatus, HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()), content); DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot()); if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null); DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics); } return rsp; }) .single(); }).map(rsp -> { if (httpRequest.reactorNettyRequestRecord() != null) { return new RxDocumentServiceResponse(this.clientContext, rsp, httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot()); } else { return new RxDocumentServiceResponse(this.clientContext, rsp); } }).onErrorResume(throwable -> { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (!(unwrappedException instanceof Exception)) { logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException); return Mono.error(unwrappedException); } Exception exception = (Exception) unwrappedException; CosmosException dce; if (!(exception instanceof CosmosException)) { logger.error("Network failure", exception); dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, 0, exception); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); } else { dce = (CosmosException) exception; } if (WebExceptionUtility.isNetworkFailure(dce)) { if (WebExceptionUtility.isReadTimeoutException(dce)) { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); } else { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE); } } if (request.requestContext.cosmosDiagnostics != null) { if (BridgeInternal.getClientSideRequestStatics(request.requestContext.cosmosDiagnostics).getGatewayRequestTimeline() == null && httpRequest.reactorNettyRequestRecord() != null) { BridgeInternal.setGatewayRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot()); } BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); }); } private void validateOrThrow(RxDocumentServiceRequest request, HttpResponseStatus status, HttpHeaders headers, byte[] bodyAsBytes) { int statusCode = status.code(); if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) { String statusCodeString = status.reasonPhrase() != null ? status.reasonPhrase().replace(" ", "") : ""; String body = bodyAsBytes != null ? new String(bodyAsBytes) : null; CosmosError cosmosError; cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError(); cosmosError = new CosmosError(statusCodeString, String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString), cosmosError.getPartitionedQueryExecutionInfo()); CosmosException dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, statusCode, cosmosError, headers.toMap()); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); throw dce; } } private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) { switch (request.getOperationType()) { case Create: case Batch: return this.create(request); case Patch: return this.patch(request); case Upsert: return this.upsert(request); case Delete: if (request.getResourceType() == ResourceType.PartitionKey) { return this.deleteByPartitionKey(request); } return this.delete(request); case ExecuteJavaScript: return this.execute(request); case Read: return this.read(request); case ReadFeed: return this.readFeed(request); case Replace: return this.replace(request); case SqlQuery: case Query: case QueryPlan: return this.query(request); default: throw new IllegalStateException("Unknown operation setType " + request.getOperationType()); } } private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) { Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single(); return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy(BridgeInternal.getRetryContext(request.requestContext.cosmosDiagnostics))); } @Override public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) { Mono<RxDocumentServiceResponse> responseObs = this.addIntendedCollectionRidAndSessionToken(request).then(invokeAsync(request)); return responseObs.onErrorResume( e -> { CosmosException dce = Utils.as(e, CosmosException.class); if (dce == null) { logger.error("unexpected failure {}", e.getMessage(), e); return Mono.error(e); } if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) && (dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED || dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT || ( dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND && !Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) { this.captureSessionToken(request, dce.getResponseHeaders()); } if (Exceptions.isThroughputControlRequestRateTooLargeException(dce)) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); } ).flatMap(response -> this.captureSessionTokenAndHandlePartitionSplit(request, response.getResponseHeaders()).then(Mono.just(response)) ); } @Override public void enableThroughputControl(ThroughputControlStore throughputControlStore) { } private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { if (request.getResourceType() == ResourceType.DocumentCollection && request.getOperationType() == OperationType.Delete) { String resourceId; if (request.getIsNameBased()) { resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID); } else { resourceId = request.getResourceId(); } this.sessionContainer.clearTokenByResourceId(resourceId); } else { this.sessionContainer.setSessionToken(request, responseHeaders); } } private Mono<Void> captureSessionTokenAndHandlePartitionSplit(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { this.captureSessionToken(request, responseHeaders); if (request.requestContext.resolvedPartitionKeyRange != null && StringUtils.isNotEmpty(request.requestContext.resolvedCollectionRid) && StringUtils.isNotEmpty(responseHeaders.get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID)) && !responseHeaders.get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID).equals(request.requestContext.resolvedPartitionKeyRange.getId())) { return this.partitionKeyRangeCache.refreshAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request.requestContext.resolvedCollectionRid) .flatMap(collectionRoutingMapValueHolder -> Mono.empty()); } return Mono.empty(); } private Mono<Void> addIntendedCollectionRidAndSessionToken(RxDocumentServiceRequest request) { return applySessionToken(request).then(addIntendedCollectionRid(request)); } private Mono<Void> addIntendedCollectionRid(RxDocumentServiceRequest request) { if (this.collectionCache != null && request.getResourceType().equals(ResourceType.Document)) { return this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request).flatMap(documentCollectionValueHolder -> { if (StringUtils.isEmpty(request.getHeaders().get(INTENDED_COLLECTION_RID_HEADER))) { request.getHeaders().put(INTENDED_COLLECTION_RID_HEADER, request.requestContext.resolvedCollectionRid); } else { request.intendedCollectionRidPassedIntoSDK = true; } return Mono.empty(); }); } return Mono.empty(); } private static boolean isMasterOperation(ResourceType resourceType, OperationType operationType) { return ReplicatedResourceClientUtils.isMasterResource(resourceType) || isStoredProcedureMasterOperation(resourceType, operationType) || operationType == OperationType.QueryPlan; } private static boolean isStoredProcedureMasterOperation(ResourceType resourceType, OperationType operationType) { return resourceType == ResourceType.StoredProcedure && operationType != OperationType.ExecuteJavaScript; } }
these seem to be required for GatewayProxyModel? in that case we should be passing them in the constructor. why are we using setter?
private void updateGatewayProxy() { ((RxGatewayStoreModel)this.gatewayProxy).setGatewayServiceConfigurationReader(this.gatewayConfigurationReader); ((RxGatewayStoreModel)this.gatewayProxy).setCollectionCache(this.collectionCache); ((RxGatewayStoreModel)this.gatewayProxy).setPartitionKeyRangeCache(this.partitionKeyRangeCache); ((RxGatewayStoreModel)this.gatewayProxy).setUseMultipleWriteLocations(this.useMultipleWriteLocations); }
((RxGatewayStoreModel)this.gatewayProxy).setPartitionKeyRangeCache(this.partitionKeyRangeCache);
private void updateGatewayProxy() { ((RxGatewayStoreModel)this.gatewayProxy).setGatewayServiceConfigurationReader(this.gatewayConfigurationReader); ((RxGatewayStoreModel)this.gatewayProxy).setCollectionCache(this.collectionCache); ((RxGatewayStoreModel)this.gatewayProxy).setPartitionKeyRangeCache(this.partitionKeyRangeCache); ((RxGatewayStoreModel)this.gatewayProxy).setUseMultipleWriteLocations(this.useMultipleWriteLocations); }
class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider, CpuMemoryListener, DiagnosticsClientContext { private static final AtomicInteger activeClientsCnt = new AtomicInteger(0); private static final AtomicInteger clientIdGenerator = new AtomicInteger(0); private static final Range<String> RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES = new Range<>( PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey, PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey, true, false); private static final String DUMMY_SQL_QUERY = "this is dummy and only used in creating " + "ParallelDocumentQueryExecutioncontext, but not used"; private final static ObjectMapper mapper = Utils.getSimpleObjectMapper(); private final ItemDeserializer itemDeserializer = new ItemDeserializer.JsonDeserializer(); private final Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class); private final String masterKeyOrResourceToken; private final URI serviceEndpoint; private final ConnectionPolicy connectionPolicy; private final ConsistencyLevel consistencyLevel; private final BaseAuthorizationTokenProvider authorizationTokenProvider; private final UserAgentContainer userAgentContainer; private final boolean hasAuthKeyResourceToken; private final Configs configs; private final boolean connectionSharingAcrossClientsEnabled; private AzureKeyCredential credential; private final TokenCredential tokenCredential; private String[] tokenCredentialScopes; private SimpleTokenCache tokenCredentialCache; private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver; AuthorizationTokenType authorizationTokenType; private SessionContainer sessionContainer; private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY; private RxClientCollectionCache collectionCache; private RxStoreModel gatewayProxy; private RxStoreModel storeModel; private GlobalAddressResolver addressResolver; private RxPartitionKeyRangeCache partitionKeyRangeCache; private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap; private final boolean contentResponseOnWriteEnabled; private Map<String, PartitionedQueryExecutionInfo> queryPlanCache; private final AtomicBoolean closed = new AtomicBoolean(false); private final int clientId; private ClientTelemetry clientTelemetry; private ApiType apiType; private IRetryPolicyFactory resetSessionTokenRetryPolicy; /** * Compatibility mode: Allows to specify compatibility mode used by client when * making query requests. Should be removed when application/sql is no longer * supported. */ private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default; private final GlobalEndpointManager globalEndpointManager; private final RetryPolicy retryPolicy; private HttpClient reactorHttpClient; private Function<HttpClient, HttpClient> httpClientInterceptor; private volatile boolean useMultipleWriteLocations; private StoreClientFactory storeClientFactory; private GatewayServiceConfigurationReader gatewayConfigurationReader; private final DiagnosticsClientConfig diagnosticsClientConfig; private final AtomicBoolean throughputControlEnabled; private ThroughputControlStore throughputControlStore; public RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver, AzureKeyCredential credential, boolean sessionCapturingOverride, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType) { this(serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs, credential, null, sessionCapturingOverride, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled, metadataCachesSnapshot, apiType); this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver; } public RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver, AzureKeyCredential credential, TokenCredential tokenCredential, boolean sessionCapturingOverride, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType) { this(serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs, credential, tokenCredential, sessionCapturingOverride, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled, metadataCachesSnapshot, apiType); this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver; } private RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, AzureKeyCredential credential, TokenCredential tokenCredential, boolean sessionCapturingOverrideEnabled, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType) { this(serviceEndpoint, masterKeyOrResourceToken, connectionPolicy, consistencyLevel, configs, credential, tokenCredential, sessionCapturingOverrideEnabled, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled, metadataCachesSnapshot, apiType); if (permissionFeed != null && permissionFeed.size() > 0) { this.resourceTokensMap = new HashMap<>(); for (Permission permission : permissionFeed) { String[] segments = StringUtils.split(permission.getResourceLink(), Constants.Properties.PATH_SEPARATOR.charAt(0)); if (segments.length <= 0) { throw new IllegalArgumentException("resourceLink"); } List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null; PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false); if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) { throw new IllegalArgumentException(permission.getResourceLink()); } partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName); if (partitionKeyAndResourceTokenPairs == null) { partitionKeyAndResourceTokenPairs = new ArrayList<>(); this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs); } PartitionKey partitionKey = permission.getResourcePartitionKey(); partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair( partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty, permission.getToken())); logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token [{}]", pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken()); } if(this.resourceTokensMap.isEmpty()) { throw new IllegalArgumentException("permissionFeed"); } String firstToken = permissionFeed.get(0).getToken(); if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) { this.firstResourceTokenFromPermissionFeed = firstToken; } } } RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, AzureKeyCredential credential, TokenCredential tokenCredential, boolean sessionCapturingOverrideEnabled, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType) { activeClientsCnt.incrementAndGet(); this.clientId = clientIdGenerator.getAndDecrement(); this.diagnosticsClientConfig = new DiagnosticsClientConfig(); this.diagnosticsClientConfig.withClientId(this.clientId); this.diagnosticsClientConfig.withActiveClientCounter(activeClientsCnt); this.diagnosticsClientConfig.withConnectionSharingAcrossClientsEnabled(connectionSharingAcrossClientsEnabled); this.diagnosticsClientConfig.withConsistency(consistencyLevel); this.throughputControlEnabled = new AtomicBoolean(false); logger.info( "Initializing DocumentClient [{}] with" + " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}], directModeProtocol [{}]", this.clientId, serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol()); try { this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled; this.configs = configs; this.masterKeyOrResourceToken = masterKeyOrResourceToken; this.serviceEndpoint = serviceEndpoint; this.credential = credential; this.tokenCredential = tokenCredential; this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled; this.authorizationTokenType = AuthorizationTokenType.Invalid; if (this.credential != null) { hasAuthKeyResourceToken = false; this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey; this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential); } else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) { this.authorizationTokenProvider = null; hasAuthKeyResourceToken = true; this.authorizationTokenType = AuthorizationTokenType.ResourceToken; } else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) { this.credential = new AzureKeyCredential(this.masterKeyOrResourceToken); hasAuthKeyResourceToken = false; this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey; this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential); } else { hasAuthKeyResourceToken = false; this.authorizationTokenProvider = null; if (tokenCredential != null) { this.tokenCredentialScopes = new String[] { serviceEndpoint.getScheme() + ": }; this.tokenCredentialCache = new SimpleTokenCache(() -> this.tokenCredential .getToken(new TokenRequestContext().addScopes(this.tokenCredentialScopes))); this.authorizationTokenType = AuthorizationTokenType.AadToken; } } if (connectionPolicy != null) { this.connectionPolicy = connectionPolicy; } else { this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig()); } this.diagnosticsClientConfig.withConnectionMode(this.getConnectionPolicy().getConnectionMode()); this.diagnosticsClientConfig.withMultipleWriteRegionsEnabled(this.connectionPolicy.isMultipleWriteRegionsEnabled()); this.diagnosticsClientConfig.withEndpointDiscoveryEnabled(this.connectionPolicy.isEndpointDiscoveryEnabled()); this.diagnosticsClientConfig.withPreferredRegions(this.connectionPolicy.getPreferredRegions()); boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled); this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing); this.consistencyLevel = consistencyLevel; this.userAgentContainer = new UserAgentContainer(); String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix(); if (userAgentSuffix != null && userAgentSuffix.length() > 0) { userAgentContainer.setSuffix(userAgentSuffix); } this.httpClientInterceptor = null; this.reactorHttpClient = httpClient(); this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs); this.retryPolicy = new RetryPolicy(this, this.globalEndpointManager, this.connectionPolicy); this.resetSessionTokenRetryPolicy = retryPolicy; CpuMemoryMonitor.register(this); this.queryPlanCache = Collections.synchronizedMap(new SizeLimitingLRUCache(Constants.QUERYPLAN_CACHE_SIZE)); this.apiType = apiType; } catch (RuntimeException e) { logger.error("unexpected failure in initializing client.", e); close(); throw e; } } @Override public DiagnosticsClientConfig getConfig() { return diagnosticsClientConfig; } @Override public CosmosDiagnostics createDiagnostics() { return BridgeInternal.createCosmosDiagnostics(this); } private void initializeGatewayConfigurationReader() { this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.globalEndpointManager); DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount(); if (databaseAccount == null) { logger.error("Client initialization failed." + " Check if the endpoint is reachable and if your auth token is valid. More info: https: throw new RuntimeException("Client initialization failed." + " Check if the endpoint is reachable and if your auth token is valid. More info: https: } this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount); } public void init(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, Function<HttpClient, HttpClient> httpClientInterceptor) { try { this.httpClientInterceptor = httpClientInterceptor; if (httpClientInterceptor != null) { this.reactorHttpClient = httpClientInterceptor.apply(httpClient()); } this.gatewayProxy = createRxGatewayProxy(this.sessionContainer, this.consistencyLevel, this.queryCompatibilityMode, this.userAgentContainer, this.globalEndpointManager, this.reactorHttpClient); this.globalEndpointManager.init(); this.initializeGatewayConfigurationReader(); if (metadataCachesSnapshot != null) { this.collectionCache = new RxClientCollectionCache(this, this.sessionContainer, this.gatewayProxy, this, this.retryPolicy, metadataCachesSnapshot.getCollectionInfoByNameCache(), metadataCachesSnapshot.getCollectionInfoByIdCache() ); } else { this.collectionCache = new RxClientCollectionCache(this, this.sessionContainer, this.gatewayProxy, this, this.retryPolicy); } this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy); this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this, collectionCache); updateGatewayProxy(); if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) { this.storeModel = this.gatewayProxy; } else { this.initializeDirectConnectivity(); } clientTelemetry = new ClientTelemetry(null, UUID.randomUUID().toString(), ManagementFactory.getRuntimeMXBean().getName(), userAgentContainer.getUserAgent(), connectionPolicy.getConnectionMode(), globalEndpointManager.getLatestDatabaseAccount().getId(), null, null, this.reactorHttpClient, connectionPolicy.isClientTelemetryEnabled(), this, this.connectionPolicy.getPreferredRegions()); clientTelemetry.init(); this.queryPlanCache = new ConcurrentHashMap<>(); } catch (Exception e) { logger.error("unexpected failure in initializing client.", e); close(); throw e; } } public void serialize(CosmosClientMetadataCachesSnapshot state) { RxCollectionCache.serialize(state, this.collectionCache); } private void initializeDirectConnectivity() { this.addressResolver = new GlobalAddressResolver(this, this.reactorHttpClient, this.globalEndpointManager, this.configs.getProtocol(), this, this.collectionCache, this.partitionKeyRangeCache, userAgentContainer, null, this.connectionPolicy); this.storeClientFactory = new StoreClientFactory( this.addressResolver, this.diagnosticsClientConfig, this.configs, this.connectionPolicy, this.userAgentContainer, this.connectionSharingAcrossClientsEnabled ); this.createStoreModel(true); } DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() { return new DatabaseAccountManagerInternal() { @Override public URI getServiceEndpoint() { return RxDocumentClientImpl.this.getServiceEndpoint(); } @Override public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) { logger.info("Getting database account endpoint from {}", endpoint); return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint); } @Override public ConnectionPolicy getConnectionPolicy() { return RxDocumentClientImpl.this.getConnectionPolicy(); } }; } RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer, ConsistencyLevel consistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient) { return new RxGatewayStoreModel( this, sessionContainer, consistencyLevel, queryCompatibilityMode, userAgentContainer, globalEndpointManager, httpClient); } private HttpClient httpClient() { HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs) .withMaxIdleConnectionTimeout(this.connectionPolicy.getIdleHttpConnectionTimeout()) .withPoolSize(this.connectionPolicy.getMaxConnectionPoolSize()) .withProxy(this.connectionPolicy.getProxy()) .withNetworkRequestTimeout(this.connectionPolicy.getHttpNetworkRequestTimeout()); if (connectionSharingAcrossClientsEnabled) { return SharedGatewayHttpClient.getOrCreateInstance(httpClientConfig, diagnosticsClientConfig); } else { diagnosticsClientConfig.withGatewayHttpClientConfig(httpClientConfig); return HttpClient.createFixed(httpClientConfig); } } private void createStoreModel(boolean subscribeRntbdStatus) { StoreClient storeClient = this.storeClientFactory.createStoreClient(this, this.addressResolver, this.sessionContainer, this.gatewayConfigurationReader, this, this.useMultipleWriteLocations ); this.storeModel = new ServerStoreModel(storeClient); } @Override public URI getServiceEndpoint() { return this.serviceEndpoint; } @Override public URI getWriteEndpoint() { return globalEndpointManager.getWriteEndpoints().stream().findFirst().orElse(null); } @Override public URI getReadEndpoint() { return globalEndpointManager.getReadEndpoints().stream().findFirst().orElse(null); } @Override public ConnectionPolicy getConnectionPolicy() { return this.connectionPolicy; } @Override public boolean isContentResponseOnWriteEnabled() { return contentResponseOnWriteEnabled; } @Override public ConsistencyLevel getConsistencyLevel() { return consistencyLevel; } @Override public ClientTelemetry getClientTelemetry() { return this.clientTelemetry; } @Override public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (database == null) { throw new IllegalArgumentException("Database"); } logger.debug("Creating a Database. id: [{}]", database.getId()); validateResource(database); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Create); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(database); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.DATABASE_SERIALIZATION); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Create, ResourceType.Database, Paths.DATABASES_ROOT, byteBuffer, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)) .map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink); String path = Utils.joinPath(databaseLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Database, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } logger.debug("Reading a Database. databaseLink: [{}]", databaseLink); String path = Utils.joinPath(databaseLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Database, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Database>> readDatabases(CosmosQueryRequestOptions options) { return readFeed(options, ResourceType.Database, Database.class, Paths.DATABASES_ROOT); } private String parentResourceLinkToQueryLink(String parentResourceLink, ResourceType resourceTypeEnum) { switch (resourceTypeEnum) { case Database: return Paths.DATABASES_ROOT; case DocumentCollection: return Utils.joinPath(parentResourceLink, Paths.COLLECTIONS_PATH_SEGMENT); case Document: return Utils.joinPath(parentResourceLink, Paths.DOCUMENTS_PATH_SEGMENT); case Offer: return Paths.OFFERS_ROOT; case User: return Utils.joinPath(parentResourceLink, Paths.USERS_PATH_SEGMENT); case ClientEncryptionKey: return Utils.joinPath(parentResourceLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT); case Permission: return Utils.joinPath(parentResourceLink, Paths.PERMISSIONS_PATH_SEGMENT); case Attachment: return Utils.joinPath(parentResourceLink, Paths.ATTACHMENTS_PATH_SEGMENT); case StoredProcedure: return Utils.joinPath(parentResourceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT); case Trigger: return Utils.joinPath(parentResourceLink, Paths.TRIGGERS_PATH_SEGMENT); case UserDefinedFunction: return Utils.joinPath(parentResourceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); case Conflict: return Utils.joinPath(parentResourceLink, Paths.CONFLICTS_PATH_SEGMENT); default: throw new IllegalArgumentException("resource type not supported"); } } private OperationContextAndListenerTuple getOperationContextAndListenerTuple(CosmosQueryRequestOptions options) { if (options == null) { return null; } return ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor().getOperationContext(options); } private OperationContextAndListenerTuple getOperationContextAndListenerTuple(RequestOptions options) { if (options == null) { return null; } return options.getOperationContextAndListenerTuple(); } private <T extends Resource> Flux<FeedResponse<T>> createQuery( String parentResourceLink, SqlQuerySpec sqlQuery, CosmosQueryRequestOptions options, Class<T> klass, ResourceType resourceTypeEnum) { String resourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum); UUID activityId = Utils.randomUUID(); IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options)); InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy( this.collectionCache, null, resourceLink, ModelBridgeInternal.getPropertiesFromQueryRequestOptions(options)); return ObservableHelper.fluxInlineIfPossibleAsObs( () -> createQueryInternal(resourceLink, sqlQuery, options, klass, resourceTypeEnum, queryClient, activityId), invalidPartitionExceptionRetryPolicy); } private <T extends Resource> Flux<FeedResponse<T>> createQueryInternal( String resourceLink, SqlQuerySpec sqlQuery, CosmosQueryRequestOptions options, Class<T> klass, ResourceType resourceTypeEnum, IDocumentQueryClient queryClient, UUID activityId) { Flux<? extends IDocumentQueryExecutionContext<T>> executionContext = DocumentQueryExecutionContextFactory .createDocumentQueryExecutionContextAsync(this, queryClient, resourceTypeEnum, klass, sqlQuery, options, resourceLink, false, activityId, Configs.isQueryPlanCachingEnabled(), queryPlanCache); AtomicBoolean isFirstResponse = new AtomicBoolean(true); return executionContext.flatMap(iDocumentQueryExecutionContext -> { QueryInfo queryInfo = null; if (iDocumentQueryExecutionContext instanceof PipelinedDocumentQueryExecutionContext) { queryInfo = ((PipelinedDocumentQueryExecutionContext<T>) iDocumentQueryExecutionContext).getQueryInfo(); } QueryInfo finalQueryInfo = queryInfo; return iDocumentQueryExecutionContext.executeAsync() .map(tFeedResponse -> { if (finalQueryInfo != null) { if (finalQueryInfo.hasSelectValue()) { ModelBridgeInternal .addQueryInfoToFeedResponse(tFeedResponse, finalQueryInfo); } if (isFirstResponse.compareAndSet(true, false)) { ModelBridgeInternal.addQueryPlanDiagnosticsContextToFeedResponse(tFeedResponse, finalQueryInfo.getQueryPlanDiagnosticsContext()); } } return tFeedResponse; }); }); } @Override public Flux<FeedResponse<Database>> queryDatabases(String query, CosmosQueryRequestOptions options) { return queryDatabases(new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(Paths.DATABASES_ROOT, querySpec, options, Database.class, ResourceType.Database); } @Override public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink, DocumentCollection collection, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink, DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (collection == null) { throw new IllegalArgumentException("collection"); } logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink, collection.getId()); validateResource(collection); String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Create); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Create, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class)) .doOnNext(resourceResponse -> { this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(), getAltLink(resourceResponse.getResource()), resourceResponse.getResponseHeaders()); }); } catch (Exception e) { logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (collection == null) { throw new IllegalArgumentException("collection"); } logger.debug("Replacing a Collection. id: [{}]", collection.getId()); validateResource(collection); String path = Utils.joinPath(collection.getSelfLink(), null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Replace); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)) .doOnNext(resourceResponse -> { if (resourceResponse.getResource() != null) { this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(), getAltLink(resourceResponse.getResource()), resourceResponse.getResponseHeaders()); } }); } catch (Exception e) { logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.DocumentCollection, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class)); } catch (Exception e) { logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e); return Mono.error(e); } } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeaders(request, RequestVerb.DELETE) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple); }); } private Mono<RxDocumentServiceResponse> deleteAllItemsByPartitionKey(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeaders(request, RequestVerb.POST) .flatMap(requestPopulated -> { RxStoreModel storeProxy = this.getStoreProxy(requestPopulated); if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple); }); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { return populateHeaders(request, RequestVerb.GET) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated); }); } Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return populateHeaders(request, RequestVerb.GET) .flatMap(requestPopulated -> getStoreProxy(requestPopulated).processMessage(requestPopulated)); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { return populateHeaders(request, RequestVerb.POST) .flatMap(requestPopulated -> this.getStoreProxy(requestPopulated).processMessage(requestPopulated) .map(response -> { this.captureSessionToken(requestPopulated, response); return response; } )); } @Override public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.DocumentCollection, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)); } catch (Exception e) { logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return readFeed(options, ResourceType.DocumentCollection, DocumentCollection.class, Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query, CosmosQueryRequestOptions options) { return createQuery(databaseLink, new SqlQuerySpec(query), options, DocumentCollection.class, ResourceType.DocumentCollection); } @Override public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(databaseLink, querySpec, options, DocumentCollection.class, ResourceType.DocumentCollection); } private static String serializeProcedureParams(List<Object> objectArray) { String[] stringArray = new String[objectArray.size()]; for (int i = 0; i < objectArray.size(); ++i) { Object object = objectArray.get(i); if (object instanceof JsonSerializable) { stringArray[i] = ModelBridgeInternal.toJsonFromJsonSerializable((JsonSerializable) object); } else { try { stringArray[i] = mapper.writeValueAsString(object); } catch (IOException e) { throw new IllegalArgumentException("Can't serialize the object into the json string", e); } } } return String.format("[%s]", StringUtils.join(stringArray, ",")); } private static void validateResource(Resource resource) { if (!StringUtils.isEmpty(resource.getId())) { if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 || resource.getId().indexOf('?') != -1 || resource.getId().indexOf(' throw new IllegalArgumentException("Id contains illegal chars."); } if (resource.getId().endsWith(" ")) { throw new IllegalArgumentException("Id ends with a space."); } } } private Map<String, String> getRequestHeaders(RequestOptions options, ResourceType resourceType, OperationType operationType) { Map<String, String> headers = new HashMap<>(); if (this.useMultipleWriteLocations) { headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString()); } if (consistencyLevel != null) { headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString()); } if (options == null) { if (!this.contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) { headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL); } return headers; } Map<String, String> customOptions = options.getHeaders(); if (customOptions != null) { headers.putAll(customOptions); } boolean contentResponseOnWriteEnabled = this.contentResponseOnWriteEnabled; if (options.isContentResponseOnWriteEnabled() != null) { contentResponseOnWriteEnabled = options.isContentResponseOnWriteEnabled(); } if (!contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) { headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL); } if (options.getIfMatchETag() != null) { headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getIfMatchETag()); } if(options.getIfNoneMatchETag() != null) { headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getIfNoneMatchETag()); } if (options.getConsistencyLevel() != null) { headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString()); } if (options.getIndexingDirective() != null) { headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString()); } if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) { String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ","); headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude); } if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) { String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ","); headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude); } if (!Strings.isNullOrEmpty(options.getSessionToken())) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken()); } if (options.getResourceTokenExpirySeconds() != null) { headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY, String.valueOf(options.getResourceTokenExpirySeconds())); } if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) { headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString()); } else if (options.getOfferType() != null) { headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType()); } if (options.getOfferThroughput() == null) { if (options.getThroughputProperties() != null) { Offer offer = ModelBridgeInternal.getOfferFromThroughputProperties(options.getThroughputProperties()); final OfferAutoscaleSettings offerAutoscaleSettings = offer.getOfferAutoScaleSettings(); OfferAutoscaleAutoUpgradeProperties autoscaleAutoUpgradeProperties = null; if (offerAutoscaleSettings != null) { autoscaleAutoUpgradeProperties = offer.getOfferAutoScaleSettings().getAutoscaleAutoUpgradeProperties(); } if (offer.hasOfferThroughput() && (offerAutoscaleSettings != null && offerAutoscaleSettings.getMaxThroughput() >= 0 || autoscaleAutoUpgradeProperties != null && autoscaleAutoUpgradeProperties .getAutoscaleThroughputProperties() .getIncrementPercent() >= 0)) { throw new IllegalArgumentException("Autoscale provisioned throughput can not be configured with " + "fixed offer"); } if (offer.hasOfferThroughput()) { headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, String.valueOf(offer.getThroughput())); } else if (offer.getOfferAutoScaleSettings() != null) { headers.put(HttpConstants.HttpHeaders.OFFER_AUTOPILOT_SETTINGS, ModelBridgeInternal.toJsonFromJsonSerializable(offer.getOfferAutoScaleSettings())); } } } if (options.isQuotaInfoEnabled()) { headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true)); } if (options.isScriptLoggingEnabled()) { headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true)); } if (options.getDedicatedGatewayRequestOptions() != null && options.getDedicatedGatewayRequestOptions().getMaxIntegratedCacheStaleness() != null) { headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_CACHE_STALENESS, String.valueOf(Utils.getMaxIntegratedCacheStalenessInMillis(options.getDedicatedGatewayRequestOptions()))); } return headers; } public IRetryPolicyFactory getResetSessionTokenRetryPolicy() { return this.resetSessionTokenRetryPolicy; } private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request, ByteBuffer contentAsByteBuffer, Document document, RequestOptions options) { Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); return collectionObs .map(collectionValueHolder -> { addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v); return request; }); } private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request, ByteBuffer contentAsByteBuffer, Object document, RequestOptions options, Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) { return collectionObs.map(collectionValueHolder -> { addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v); return request; }); } private void addPartitionKeyInformation(RxDocumentServiceRequest request, ByteBuffer contentAsByteBuffer, Object objectDoc, RequestOptions options, DocumentCollection collection) { PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey(); PartitionKeyInternal partitionKeyInternal = null; if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){ partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition); } else if (options != null && options.getPartitionKey() != null) { partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey()); } else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) { partitionKeyInternal = PartitionKeyInternal.getEmpty(); } else if (contentAsByteBuffer != null || objectDoc != null) { InternalObjectNode internalObjectNode; if (objectDoc instanceof InternalObjectNode) { internalObjectNode = (InternalObjectNode) objectDoc; } else if (objectDoc instanceof ObjectNode) { internalObjectNode = new InternalObjectNode((ObjectNode)objectDoc); } else if (contentAsByteBuffer != null) { contentAsByteBuffer.rewind(); internalObjectNode = new InternalObjectNode(contentAsByteBuffer); } else { throw new IllegalStateException("ContentAsByteBuffer and objectDoc are null"); } Instant serializationStartTime = Instant.now(); partitionKeyInternal = extractPartitionKeyValueFromDocument(internalObjectNode, partitionKeyDefinition); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTime, serializationEndTime, SerializationDiagnosticsContext.SerializationType.PARTITION_KEY_FETCH_SERIALIZATION ); SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } } else { throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation."); } request.setPartitionKeyInternal(partitionKeyInternal); request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson())); } public static PartitionKeyInternal extractPartitionKeyValueFromDocument( InternalObjectNode document, PartitionKeyDefinition partitionKeyDefinition) { if (partitionKeyDefinition != null) { switch (partitionKeyDefinition.getKind()) { case HASH: String path = partitionKeyDefinition.getPaths().iterator().next(); List<String> parts = PathParser.getPathParts(path); if (parts.size() >= 1) { Object value = ModelBridgeInternal.getObjectByPathFromJsonSerializable(document, parts); if (value == null || value.getClass() == ObjectNode.class) { value = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition); } if (value instanceof PartitionKeyInternal) { return (PartitionKeyInternal) value; } else { return PartitionKeyInternal.fromObjectArray(Collections.singletonList(value), false); } } break; case MULTI_HASH: Object[] partitionKeyValues = new Object[partitionKeyDefinition.getPaths().size()]; for(int pathIter = 0 ; pathIter < partitionKeyDefinition.getPaths().size(); pathIter++){ String partitionPath = partitionKeyDefinition.getPaths().get(pathIter); List<String> partitionPathParts = PathParser.getPathParts(partitionPath); partitionKeyValues[pathIter] = ModelBridgeInternal.getObjectByPathFromJsonSerializable(document, partitionPathParts); } return PartitionKeyInternal.fromObjectArray(partitionKeyValues, false); default: throw new IllegalArgumentException("Unrecognized Partition kind: " + partitionKeyDefinition.getKind()); } } return null; } private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy, String documentCollectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, OperationType operationType) { if (StringUtils.isEmpty(documentCollectionLink)) { throw new IllegalArgumentException("documentCollectionLink"); } if (document == null) { throw new IllegalArgumentException("document"); } Instant serializationStartTimeUTC = Instant.now(); ByteBuffer content = BridgeInternal.serializeJsonToByteBuffer(document, mapper); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.Document, path, requestHeaders, options, content); if (requestRetryPolicy != null) { requestRetryPolicy.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); return addPartitionKeyInformation(request, content, document, options, collectionObs); } private Mono<RxDocumentServiceRequest> getBatchDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy, String documentCollectionLink, ServerBatchRequest serverBatchRequest, RequestOptions options, boolean disableAutomaticIdGeneration) { checkArgument(StringUtils.isNotEmpty(documentCollectionLink), "expected non empty documentCollectionLink"); checkNotNull(serverBatchRequest, "expected non null serverBatchRequest"); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer content = ByteBuffer.wrap(Utils.getUTF8Bytes(serverBatchRequest.getRequestBody())); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Batch); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( this, OperationType.Batch, ResourceType.Document, path, requestHeaders, options, content); if (requestRetryPolicy != null) { requestRetryPolicy.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); return collectionObs.map((Utils.ValueHolder<DocumentCollection> collectionValueHolder) -> { addBatchHeaders(request, serverBatchRequest, collectionValueHolder.v); return request; }); } private RxDocumentServiceRequest addBatchHeaders(RxDocumentServiceRequest request, ServerBatchRequest serverBatchRequest, DocumentCollection collection) { if(serverBatchRequest instanceof SinglePartitionKeyServerBatchRequest) { PartitionKey partitionKey = ((SinglePartitionKeyServerBatchRequest) serverBatchRequest).getPartitionKeyValue(); PartitionKeyInternal partitionKeyInternal; if (partitionKey.equals(PartitionKey.NONE)) { PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey(); partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition); } else { partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(partitionKey); } request.setPartitionKeyInternal(partitionKeyInternal); request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson())); } else if(serverBatchRequest instanceof PartitionKeyRangeServerBatchRequest) { request.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(((PartitionKeyRangeServerBatchRequest) serverBatchRequest).getPartitionKeyRangeId())); } else { throw new UnsupportedOperationException("Unknown Server request."); } request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_REQUEST, Boolean.TRUE.toString()); request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_ATOMIC, String.valueOf(serverBatchRequest.isAtomicBatch())); request.getHeaders().put(HttpConstants.HttpHeaders.SHOULD_BATCH_CONTINUE_ON_ERROR, String.valueOf(serverBatchRequest.isShouldContinueOnError())); request.setNumberOfItemsInBatchRequest(serverBatchRequest.getOperations().size()); return request; } private Mono<RxDocumentServiceRequest> populateHeaders(RxDocumentServiceRequest request, RequestVerb httpMethod) { request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123()); if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null || this.cosmosAuthorizationTokenResolver != null || this.credential != null) { String resourceName = request.getResourceAddress(); String authorization = this.getUserAuthorizationToken( resourceName, request.getResourceType(), httpMethod, request.getHeaders(), AuthorizationTokenType.PrimaryMasterKey, request.properties); try { authorization = URLEncoder.encode(authorization, "UTF-8"); } catch (UnsupportedEncodingException e) { throw new IllegalStateException("Failed to encode authtoken.", e); } request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); } if (this.apiType != null) { request.getHeaders().put(HttpConstants.HttpHeaders.API_TYPE, this.apiType.toString()); } if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod)) && !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) { request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON); } if (RequestVerb.PATCH.equals(httpMethod) && !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) { request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON_PATCH); } if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) { request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON); } MetadataDiagnosticsContext metadataDiagnosticsCtx = BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics); if (this.requiresFeedRangeFiltering(request)) { return request.getFeedRange() .populateFeedRangeFilteringHeaders( this.getPartitionKeyRangeCache(), request, this.collectionCache.resolveCollectionAsync(metadataDiagnosticsCtx, request)) .flatMap(this::populateAuthorizationHeader); } return this.populateAuthorizationHeader(request); } private boolean requiresFeedRangeFiltering(RxDocumentServiceRequest request) { if (request.getResourceType() != ResourceType.Document && request.getResourceType() != ResourceType.Conflict) { return false; } switch (request.getOperationType()) { case ReadFeed: case Query: case SqlQuery: return request.getFeedRange() != null; default: return false; } } @Override public Mono<RxDocumentServiceRequest> populateAuthorizationHeader(RxDocumentServiceRequest request) { if (request == null) { throw new IllegalArgumentException("request"); } if (this.authorizationTokenType == AuthorizationTokenType.AadToken) { return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache) .map(authorization -> { request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); return request; }); } else { return Mono.just(request); } } @Override public Mono<HttpHeaders> populateAuthorizationHeader(HttpHeaders httpHeaders) { if (httpHeaders == null) { throw new IllegalArgumentException("httpHeaders"); } if (this.authorizationTokenType == AuthorizationTokenType.AadToken) { return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache) .map(authorization -> { httpHeaders.set(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); return httpHeaders; }); } return Mono.just(httpHeaders); } @Override public AuthorizationTokenType getAuthorizationTokenType() { return this.authorizationTokenType; } @Override public String getUserAuthorizationToken(String resourceName, ResourceType resourceType, RequestVerb requestVerb, Map<String, String> headers, AuthorizationTokenType tokenType, Map<String, Object> properties) { if (this.cosmosAuthorizationTokenResolver != null) { return this.cosmosAuthorizationTokenResolver.getAuthorizationToken(requestVerb, resourceName, this.resolveCosmosResourceType(resourceType), properties != null ? Collections.unmodifiableMap(properties) : null); } else if (credential != null) { return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName, resourceType, headers); } else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) { return masterKeyOrResourceToken; } else { assert resourceTokensMap != null; if(resourceType.equals(ResourceType.DatabaseAccount)) { return this.firstResourceTokenFromPermissionFeed; } return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers); } } private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) { CosmosResourceType cosmosResourceType = ModelBridgeInternal.fromServiceSerializedFormat(resourceType.toString()); if (cosmosResourceType == null) { return CosmosResourceType.SYSTEM; } return cosmosResourceType; } void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) { this.sessionContainer.setSessionToken(request, response.getResponseHeaders()); } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeaders(request, RequestVerb.POST) .flatMap(requestPopulated -> { RxStoreModel storeProxy = this.getStoreProxy(requestPopulated); if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple); }); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeaders(request, RequestVerb.POST) .flatMap(requestPopulated -> { Map<String, String> headers = requestPopulated.getHeaders(); assert (headers != null); headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true"); if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple) .map(response -> { this.captureSessionToken(requestPopulated, response); return response; } ); }); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { return populateHeaders(request, RequestVerb.PUT) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated); }); } private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { populateHeaders(request, RequestVerb.PATCH); if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(request).processMessage(request); } @Override public Mono<ResourceResponse<Document>> createDocument(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs(() -> createDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), requestRetryPolicy); } private Mono<ResourceResponse<Document>> createDocumentInternal(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy requestRetryPolicy) { try { logger.debug("Creating a Document. collectionLink: [{}]", collectionLink); Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(requestRetryPolicy, collectionLink, document, options, disableAutomaticIdGeneration, OperationType.Create); Mono<RxDocumentServiceResponse> responseObservable = requestObs.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options))); return responseObservable .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs(() -> upsertDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), finalRetryPolicyInstance); } private Mono<ResourceResponse<Document>> upsertDocumentInternal(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink); Mono<RxDocumentServiceRequest> reqObs = getCreateDocumentRequest(retryPolicyInstance, collectionLink, document, options, disableAutomaticIdGeneration, OperationType.Upsert); Mono<RxDocumentServiceResponse> responseObservable = reqObs.flatMap(request -> upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options))); return responseObservable .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { String collectionLink = Utils.getCollectionName(documentLink); requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(documentLink, document, options, finalRequestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink, Object document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } if (document == null) { throw new IllegalArgumentException("document"); } Document typedDocument = documentFromObject(document, mapper); return this.replaceDocumentInternal(documentLink, typedDocument, options, retryPolicyInstance); } catch (Exception e) { logger.debug("Failure in replacing a document due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { String collectionLink = document.getSelfLink(); requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(document, options, finalRequestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<Document>> replaceDocumentInternal(Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (document == null) { throw new IllegalArgumentException("document"); } return this.replaceDocumentInternal(document.getSelfLink(), document, options, retryPolicyInstance); } catch (Exception e) { logger.debug("Failure in replacing a database due to [{}]", e.getMessage()); return Mono.error(e); } } private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink, Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { if (document == null) { throw new IllegalArgumentException("document"); } logger.debug("Replacing a Document. documentLink: [{}]", documentLink); final String path = Utils.joinPath(documentLink, null); final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Replace); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer content = serializeJsonToByteBuffer(document); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, content, document, options, collectionObs); return requestObs.flatMap(req -> replace(request, retryPolicyInstance) .map(resp -> toResourceResponse(resp, Document.class))); } @Override public Mono<ResourceResponse<Document>> patchDocument(String documentLink, CosmosPatchOperations cosmosPatchOperations, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> patchDocumentInternal(documentLink, cosmosPatchOperations, options, documentClientRetryPolicy), documentClientRetryPolicy); } private Mono<ResourceResponse<Document>> patchDocumentInternal(String documentLink, CosmosPatchOperations cosmosPatchOperations, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { checkArgument(StringUtils.isNotEmpty(documentLink), "expected non empty documentLink"); checkNotNull(cosmosPatchOperations, "expected non null cosmosPatchOperations"); logger.debug("Running patch operations on Document. documentLink: [{}]", documentLink); final String path = Utils.joinPath(documentLink, null); final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Patch); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer content = ByteBuffer.wrap(PatchUtil.serializeCosmosPatchToByteArray(cosmosPatchOperations, options)); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); final RxDocumentServiceRequest request = RxDocumentServiceRequest.create( this, OperationType.Patch, ResourceType.Document, path, requestHeaders, options, content); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation( request, null, null, options, collectionObs); return requestObs.flatMap(req -> patch(request, retryPolicyInstance) .map(resp -> toResourceResponse(resp, Document.class))); } @Override public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(documentLink, null, options, requestRetryPolicy), requestRetryPolicy); } @Override public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(documentLink, internalObjectNode, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<Document>> deleteDocumentInternal(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } logger.debug("Deleting a Document. documentLink: [{}]", documentLink); String path = Utils.joinPath(documentLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Document, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, internalObjectNode, options, collectionObs); return requestObs.flatMap(req -> this .delete(req, retryPolicyInstance, getOperationContextAndListenerTuple(options)) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class))); } catch (Exception e) { logger.debug("Failure in deleting a document due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKey(String collectionLink, PartitionKey partitionKey, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteAllDocumentsByPartitionKeyInternal(collectionLink, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKeyInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Deleting all items by Partition Key. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.PartitionKey, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.PartitionKey, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs); return requestObs.flatMap(req -> this .deleteAllItemsByPartitionKey(req, retryPolicyInstance, getOperationContextAndListenerTuple(options)) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class))); } catch (Exception e) { logger.debug("Failure in deleting documents due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readDocumentInternal(documentLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Document>> readDocumentInternal(String documentLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } logger.debug("Reading a Document. documentLink: [{}]", documentLink); String path = Utils.joinPath(documentLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Document, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs); return requestObs.flatMap(req -> { return this.read(request, retryPolicyInstance).map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); }); } catch (Exception e) { logger.debug("Failure in reading a document due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Flux<FeedResponse<Document>> readDocuments(String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return queryDocuments(collectionLink, "SELECT * FROM r", options); } @Override public <T> Mono<FeedResponse<T>> readMany( List<CosmosItemIdentity> itemIdentityList, String collectionLink, CosmosQueryRequestOptions options, Class<T> klass) { String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Query, ResourceType.Document, collectionLink, null ); Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request); return collectionObs .flatMap(documentCollectionResourceResponse -> { final DocumentCollection collection = documentCollectionResourceResponse.v; if (collection == null) { throw new IllegalStateException("Collection cannot be null"); } final PartitionKeyDefinition pkDefinition = collection.getPartitionKey(); Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache .tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collection.getResourceId(), null, null); return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> { Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap = new HashMap<>(); CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v; if (routingMap == null) { throw new IllegalStateException("Failed to get routing map."); } itemIdentityList .forEach(itemIdentity -> { String effectivePartitionKeyString = PartitionKeyInternalHelper .getEffectivePartitionKeyString( BridgeInternal.getPartitionKeyInternal( itemIdentity.getPartitionKey()), pkDefinition); PartitionKeyRange range = routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString); if (partitionRangeItemKeyMap.get(range) == null) { List<CosmosItemIdentity> list = new ArrayList<>(); list.add(itemIdentity); partitionRangeItemKeyMap.put(range, list); } else { List<CosmosItemIdentity> pairs = partitionRangeItemKeyMap.get(range); pairs.add(itemIdentity); partitionRangeItemKeyMap.put(range, pairs); } }); Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap; rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap, collection.getPartitionKey()); return createReadManyQuery( resourceLink, new SqlQuerySpec(DUMMY_SQL_QUERY), options, Document.class, ResourceType.Document, collection, Collections.unmodifiableMap(rangeQueryMap)) .collectList() .map(feedList -> { List<T> finalList = new ArrayList<>(); HashMap<String, String> headers = new HashMap<>(); ConcurrentMap<String, QueryMetrics> aggregatedQueryMetrics = new ConcurrentHashMap<>(); double requestCharge = 0; for (FeedResponse<Document> page : feedList) { ConcurrentMap<String, QueryMetrics> pageQueryMetrics = ModelBridgeInternal.queryMetrics(page); if (pageQueryMetrics != null) { pageQueryMetrics.forEach( aggregatedQueryMetrics::putIfAbsent); } requestCharge += page.getRequestCharge(); finalList.addAll(page.getResults().stream().map(document -> ModelBridgeInternal.toObjectFromJsonSerializable(document, klass)).collect(Collectors.toList())); } headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double .toString(requestCharge)); FeedResponse<T> frp = BridgeInternal .createFeedResponse(finalList, headers); return frp; }); }); } ); } private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap( Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap, PartitionKeyDefinition partitionKeyDefinition) { Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>(); String partitionKeySelector = createPkSelector(partitionKeyDefinition); for(Map.Entry<PartitionKeyRange, List<CosmosItemIdentity>> entry: partitionRangeItemKeyMap.entrySet()) { SqlQuerySpec sqlQuerySpec; if (partitionKeySelector.equals("[\"id\"]")) { sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(entry.getValue(), partitionKeySelector); } else { sqlQuerySpec = createReadManyQuerySpec(entry.getValue(), partitionKeySelector); } rangeQueryMap.put(entry.getKey(), sqlQuerySpec); } return rangeQueryMap; } private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame( List<CosmosItemIdentity> idPartitionKeyPairList, String partitionKeySelector) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( "); for (int i = 0; i < idPartitionKeyPairList.size(); i++) { CosmosItemIdentity itemIdentity = idPartitionKeyPairList.get(i); String idValue = itemIdentity.getId(); String idParamName = "@param" + i; PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey(); Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey); if (!Objects.equals(idValue, pkValue)) { continue; } parameters.add(new SqlParameter(idParamName, idValue)); queryStringBuilder.append(idParamName); if (i < idPartitionKeyPairList.size() - 1) { queryStringBuilder.append(", "); } } queryStringBuilder.append(" )"); return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } private SqlQuerySpec createReadManyQuerySpec(List<CosmosItemIdentity> itemIdentities, String partitionKeySelector) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE ( "); for (int i = 0; i < itemIdentities.size(); i++) { CosmosItemIdentity itemIdentity = itemIdentities.get(i); PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey(); Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey); String pkParamName = "@param" + (2 * i); parameters.add(new SqlParameter(pkParamName, pkValue)); String idValue = itemIdentity.getId(); String idParamName = "@param" + (2 * i + 1); parameters.add(new SqlParameter(idParamName, idValue)); queryStringBuilder.append("("); queryStringBuilder.append("c.id = "); queryStringBuilder.append(idParamName); queryStringBuilder.append(" AND "); queryStringBuilder.append(" c"); queryStringBuilder.append(partitionKeySelector); queryStringBuilder.append((" = ")); queryStringBuilder.append(pkParamName); queryStringBuilder.append(" )"); if (i < itemIdentities.size() - 1) { queryStringBuilder.append(" OR "); } } queryStringBuilder.append(" )"); return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } private String createPkSelector(PartitionKeyDefinition partitionKeyDefinition) { return partitionKeyDefinition.getPaths() .stream() .map(pathPart -> StringUtils.substring(pathPart, 1)) .map(pathPart -> StringUtils.replace(pathPart, "\"", "\\")) .map(part -> "[\"" + part + "\"]") .collect(Collectors.joining()); } private <T extends Resource> Flux<FeedResponse<T>> createReadManyQuery( String parentResourceLink, SqlQuerySpec sqlQuery, CosmosQueryRequestOptions options, Class<T> klass, ResourceType resourceTypeEnum, DocumentCollection collection, Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) { UUID activityId = Utils.randomUUID(); IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options)); Flux<? extends IDocumentQueryExecutionContext<T>> executionContext = DocumentQueryExecutionContextFactory.createReadManyQueryAsync(this, queryClient, collection.getResourceId(), sqlQuery, rangeQueryMap, options, collection.getResourceId(), parentResourceLink, activityId, klass, resourceTypeEnum); return executionContext.flatMap(IDocumentQueryExecutionContext<T>::executeAsync); } @Override public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, String query, CosmosQueryRequestOptions options) { return queryDocuments(collectionLink, new SqlQuerySpec(query), options); } private IDocumentQueryClient documentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl, OperationContextAndListenerTuple operationContextAndListenerTuple) { return new IDocumentQueryClient () { @Override public RxCollectionCache getCollectionCache() { return RxDocumentClientImpl.this.collectionCache; } @Override public RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return RxDocumentClientImpl.this.partitionKeyRangeCache; } @Override public IRetryPolicyFactory getResetSessionTokenRetryPolicy() { return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy; } @Override public ConsistencyLevel getDefaultConsistencyLevelAsync() { return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel(); } @Override public ConsistencyLevel getDesiredConsistencyLevelAsync() { return RxDocumentClientImpl.this.consistencyLevel; } @Override public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) { if (operationContextAndListenerTuple == null) { return RxDocumentClientImpl.this.query(request).single(); } else { final OperationListener listener = operationContextAndListenerTuple.getOperationListener(); final OperationContext operationContext = operationContextAndListenerTuple.getOperationContext(); request.getHeaders().put(HttpConstants.HttpHeaders.CORRELATED_ACTIVITY_ID, operationContext.getCorrelationActivityId()); listener.requestListener(operationContext, request); return RxDocumentClientImpl.this.query(request).single().doOnNext( response -> listener.responseListener(operationContext, response) ).doOnError( ex -> listener.exceptionListener(operationContext, ex) ); } } @Override public QueryCompatibilityMode getQueryCompatibilityMode() { return QueryCompatibilityMode.Default; } @Override public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) { return null; } }; } @Override public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { SqlQuerySpecLogger.getInstance().logQuery(querySpec); return createQuery(collectionLink, querySpec, options, Document.class, ResourceType.Document); } @Override public Flux<FeedResponse<Document>> queryDocumentChangeFeed( final DocumentCollection collection, final CosmosChangeFeedRequestOptions changeFeedOptions) { checkNotNull(collection, "Argument 'collection' must not be null."); ChangeFeedQueryImpl<Document> changeFeedQueryImpl = new ChangeFeedQueryImpl<>( this, ResourceType.Document, Document.class, collection.getAltLink(), collection.getResourceId(), changeFeedOptions); return changeFeedQueryImpl.executeAsync(); } @Override public Flux<FeedResponse<Document>> readAllDocuments( String collectionLink, PartitionKey partitionKey, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (partitionKey == null) { throw new IllegalArgumentException("partitionKey"); } RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Query, ResourceType.Document, collectionLink, null ); Flux<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request).flux(); return collectionObs.flatMap(documentCollectionResourceResponse -> { DocumentCollection collection = documentCollectionResourceResponse.v; if (collection == null) { throw new IllegalStateException("Collection cannot be null"); } PartitionKeyDefinition pkDefinition = collection.getPartitionKey(); String pkSelector = createPkSelector(pkDefinition); SqlQuerySpec querySpec = createLogicalPartitionScanQuerySpec(partitionKey, pkSelector); String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document); UUID activityId = Utils.randomUUID(); IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options)); final CosmosQueryRequestOptions effectiveOptions = ModelBridgeInternal.createQueryRequestOptions(options); InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy( this.collectionCache, null, resourceLink, ModelBridgeInternal.getPropertiesFromQueryRequestOptions(effectiveOptions)); return ObservableHelper.fluxInlineIfPossibleAsObs( () -> { Flux<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = this.partitionKeyRangeCache .tryLookupAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collection.getResourceId(), null, null).flux(); return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> { CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v; if (routingMap == null) { throw new IllegalStateException("Failed to get routing map."); } String effectivePartitionKeyString = PartitionKeyInternalHelper .getEffectivePartitionKeyString( BridgeInternal.getPartitionKeyInternal(partitionKey), pkDefinition); PartitionKeyRange range = routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString); return createQueryInternal( resourceLink, querySpec, ModelBridgeInternal.setPartitionKeyRangeIdInternal(effectiveOptions, range.getId()), Document.class, ResourceType.Document, queryClient, activityId); }); }, invalidPartitionExceptionRetryPolicy); }); } @Override public Map<String, PartitionedQueryExecutionInfo> getQueryPlanCache() { return queryPlanCache; } @Override public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class, Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT)); } private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (storedProcedure == null) { throw new IllegalArgumentException("storedProcedure"); } validateResource(storedProcedure); String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options); return request; } private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (udf == null) { throw new IllegalArgumentException("udf"); } validateResource(udf); String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options); return request; } @Override public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink, StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]", collectionLink, storedProcedure.getId()); RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options, OperationType.Create); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedure(String collectionLink, StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedureInternal(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]", collectionLink, storedProcedure.getId()); RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in upserting a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (storedProcedure == null) { throw new IllegalArgumentException("storedProcedure"); } logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId()); RxDocumentClientImpl.validateResource(storedProcedure); String path = Utils.joinPath(storedProcedure.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(storedProcedureLink)) { throw new IllegalArgumentException("storedProcedureLink"); } logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.StoredProcedure, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(storedProcedureLink)) { throw new IllegalArgumentException("storedProcedureLink"); } logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.StoredProcedure, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.StoredProcedure, StoredProcedure.class, Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT)); } @Override public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query, CosmosQueryRequestOptions options) { return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(collectionLink, querySpec, options, StoredProcedure.class, ResourceType.StoredProcedure); } @Override public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink, List<Object> procedureParams) { return this.executeStoredProcedure(storedProcedureLink, null, procedureParams); } @Override public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink, RequestOptions options, List<Object> procedureParams) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy); } @Override public Mono<CosmosBatchResponse> executeBatchRequest(String collectionLink, ServerBatchRequest serverBatchRequest, RequestOptions options, boolean disableAutomaticIdGeneration) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> executeBatchRequestInternal(collectionLink, serverBatchRequest, options, documentClientRetryPolicy, disableAutomaticIdGeneration), documentClientRetryPolicy); } private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink, RequestOptions options, List<Object> procedureParams, DocumentClientRetryPolicy retryPolicy) { try { logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.ExecuteJavaScript); requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.ExecuteJavaScript, ResourceType.StoredProcedure, path, procedureParams != null && !procedureParams.isEmpty() ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "", requestHeaders, options); if (retryPolicy != null) { retryPolicy.onBeforeSendRequest(request); } Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options); return reqObs.flatMap(req -> create(request, retryPolicy, getOperationContextAndListenerTuple(options)) .map(response -> { this.captureSessionToken(request, response); return toStoredProcedureResponse(response); })); } catch (Exception e) { logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } private Mono<CosmosBatchResponse> executeBatchRequestInternal(String collectionLink, ServerBatchRequest serverBatchRequest, RequestOptions options, DocumentClientRetryPolicy requestRetryPolicy, boolean disableAutomaticIdGeneration) { try { logger.debug("Executing a Batch request with number of operations {}", serverBatchRequest.getOperations().size()); Mono<RxDocumentServiceRequest> requestObs = getBatchDocumentRequest(requestRetryPolicy, collectionLink, serverBatchRequest, options, disableAutomaticIdGeneration); Mono<RxDocumentServiceResponse> responseObservable = requestObs.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options))); return responseObservable .map(serviceResponse -> BatchResponseParser.fromDocumentServiceResponse(serviceResponse, serverBatchRequest, true)); } catch (Exception ex) { logger.debug("Failure in executing a batch due to [{}]", ex.getMessage(), ex); return Mono.error(ex); } } @Override public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink, trigger.getId()); RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options, OperationType.Create); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Trigger>> upsertTrigger(String collectionLink, Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> upsertTriggerInternal(String collectionLink, Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a Trigger. collectionLink [{}], trigger id [{}]", collectionLink, trigger.getId()); RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options, OperationType.Upsert); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in upserting a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (trigger == null) { throw new IllegalArgumentException("trigger"); } RxDocumentClientImpl.validateResource(trigger); String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.Trigger, path, trigger, requestHeaders, options); return request; } @Override public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (trigger == null) { throw new IllegalArgumentException("trigger"); } logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId()); RxDocumentClientImpl.validateResource(trigger); String path = Utils.joinPath(trigger.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Trigger, path, trigger, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(triggerLink)) { throw new IllegalArgumentException("triggerLink"); } logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink); String path = Utils.joinPath(triggerLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Trigger, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(triggerLink)) { throw new IllegalArgumentException("triggerLink"); } logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink); String path = Utils.joinPath(triggerLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Trigger, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.Trigger, Trigger.class, Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query, CosmosQueryRequestOptions options) { return queryTriggers(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(collectionLink, querySpec, options, Trigger.class, ResourceType.Trigger); } @Override public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink, UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink, UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink, udf.getId()); RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options, OperationType.Create); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunction(String collectionLink, UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunctionInternal(String collectionLink, UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink, udf.getId()); RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options, OperationType.Upsert); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in upserting a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (udf == null) { throw new IllegalArgumentException("udf"); } logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId()); validateResource(udf); String path = Utils.joinPath(udf.getSelfLink(), null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(udfLink)) { throw new IllegalArgumentException("udfLink"); } logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink); String path = Utils.joinPath(udfLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.UserDefinedFunction, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(udfLink)) { throw new IllegalArgumentException("udfLink"); } logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink); String path = Utils.joinPath(udfLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.UserDefinedFunction, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.UserDefinedFunction, UserDefinedFunction.class, Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink, String query, CosmosQueryRequestOptions options) { return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(collectionLink, querySpec, options, UserDefinedFunction.class, ResourceType.UserDefinedFunction); } @Override public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(conflictLink)) { throw new IllegalArgumentException("conflictLink"); } logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink); String path = Utils.joinPath(conflictLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Conflict, path, requestHeaders, options); Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options); return reqObs.flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class)); }); } catch (Exception e) { logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.Conflict, Conflict.class, Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query, CosmosQueryRequestOptions options) { return queryConflicts(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(collectionLink, querySpec, options, Conflict.class, ResourceType.Conflict); } @Override public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(conflictLink)) { throw new IllegalArgumentException("conflictLink"); } logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink); String path = Utils.joinPath(conflictLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Conflict, path, requestHeaders, options); Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options); return reqObs.flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Conflict.class)); }); } catch (Exception e) { logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy); } private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId()); RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create); return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId()); RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (user == null) { throw new IllegalArgumentException("user"); } RxDocumentClientImpl.validateResource(user); String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.User, path, user, requestHeaders, options); return request; } @Override public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (user == null) { throw new IllegalArgumentException("user"); } logger.debug("Replacing a User. user id [{}]", user.getId()); RxDocumentClientImpl.validateResource(user); String path = Utils.joinPath(user.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.User, path, user, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } logger.debug("Deleting a User. userLink [{}]", userLink); String path = Utils.joinPath(userLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.User, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } logger.debug("Reading a User. userLink [{}]", userLink); String path = Utils.joinPath(userLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.User, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<User>> readUsers(String databaseLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return readFeed(options, ResourceType.User, User.class, Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, CosmosQueryRequestOptions options) { return queryUsers(databaseLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(databaseLink, querySpec, options, User.class, ResourceType.User); } @Override public Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKey(String clientEncryptionKeyLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readClientEncryptionKeyInternal(clientEncryptionKeyLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKeyInternal(String clientEncryptionKeyLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(clientEncryptionKeyLink)) { throw new IllegalArgumentException("clientEncryptionKeyLink"); } logger.debug("Reading a client encryption key. clientEncryptionKeyLink [{}]", clientEncryptionKeyLink); String path = Utils.joinPath(clientEncryptionKeyLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.ClientEncryptionKey, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class)); } catch (Exception e) { logger.debug("Failure in reading a client encryption key due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKey(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createClientEncryptionKeyInternal(databaseLink, clientEncryptionKey, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKeyInternal(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a client encryption key. databaseLink [{}], clientEncryptionKey id [{}]", databaseLink, clientEncryptionKey.getId()); RxDocumentServiceRequest request = getClientEncryptionKeyRequest(databaseLink, clientEncryptionKey, options, OperationType.Create); return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, ClientEncryptionKey.class)); } catch (Exception e) { logger.debug("Failure in creating a client encryption key due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getClientEncryptionKeyRequest(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (clientEncryptionKey == null) { throw new IllegalArgumentException("clientEncryptionKey"); } RxDocumentClientImpl.validateResource(clientEncryptionKey); String path = Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options); return request; } @Override public Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKey(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceClientEncryptionKeyInternal(clientEncryptionKey, nameBasedLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKeyInternal(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (clientEncryptionKey == null) { throw new IllegalArgumentException("clientEncryptionKey"); } logger.debug("Replacing a clientEncryptionKey. clientEncryptionKey id [{}]", clientEncryptionKey.getId()); RxDocumentClientImpl.validateResource(clientEncryptionKey); String path = Utils.joinPath(nameBasedLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class)); } catch (Exception e) { logger.debug("Failure in replacing a clientEncryptionKey due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<ClientEncryptionKey>> readClientEncryptionKeys(String databaseLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return readFeed(options, ResourceType.ClientEncryptionKey, ClientEncryptionKey.class, Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT)); } @Override public Flux<FeedResponse<ClientEncryptionKey>> queryClientEncryptionKeys(String databaseLink, String query, CosmosQueryRequestOptions options) { return queryClientEncryptionKeys(databaseLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<ClientEncryptionKey>> queryClientEncryptionKeys(String databaseLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(databaseLink, querySpec, options, ClientEncryptionKey.class, ResourceType.ClientEncryptionKey); } @Override public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy()); } private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId()); RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options, OperationType.Create); return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId()); RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } if (permission == null) { throw new IllegalArgumentException("permission"); } RxDocumentClientImpl.validateResource(permission); String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.Permission, path, permission, requestHeaders, options); return request; } @Override public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (permission == null) { throw new IllegalArgumentException("permission"); } logger.debug("Replacing a Permission. permission id [{}]", permission.getId()); RxDocumentClientImpl.validateResource(permission); String path = Utils.joinPath(permission.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Permission, path, permission, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(permissionLink)) { throw new IllegalArgumentException("permissionLink"); } logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink); String path = Utils.joinPath(permissionLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Permission, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) { try { if (StringUtils.isEmpty(permissionLink)) { throw new IllegalArgumentException("permissionLink"); } logger.debug("Reading a Permission. permissionLink [{}]", permissionLink); String path = Utils.joinPath(permissionLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Permission, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Permission>> readPermissions(String userLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } return readFeed(options, ResourceType.Permission, Permission.class, Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query, CosmosQueryRequestOptions options) { return queryPermissions(userLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(userLink, querySpec, options, Permission.class, ResourceType.Permission); } @Override public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy); } private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) { try { if (offer == null) { throw new IllegalArgumentException("offer"); } logger.debug("Replacing an Offer. offer id [{}]", offer.getId()); RxDocumentClientImpl.validateResource(offer); String path = Utils.joinPath(offer.getSelfLink(), null); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Offer, path, offer, null, null); return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class)); } catch (Exception e) { logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Offer>> readOffer(String offerLink) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(offerLink)) { throw new IllegalArgumentException("offerLink"); } logger.debug("Reading an Offer. offerLink [{}]", offerLink); String path = Utils.joinPath(offerLink, null); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Offer, path, (HashMap<String, String>)null, null); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class)); } catch (Exception e) { logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Offer>> readOffers(CosmosQueryRequestOptions options) { return readFeed(options, ResourceType.Offer, Offer.class, Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null)); } private <T extends Resource> Flux<FeedResponse<T>> readFeed(CosmosQueryRequestOptions options, ResourceType resourceType, Class<T> klass, String resourceLink) { if (options == null) { options = new CosmosQueryRequestOptions(); } Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(options); int maxPageSize = maxItemCount != null ? maxItemCount : -1; final CosmosQueryRequestOptions finalCosmosQueryRequestOptions = options; BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> { Map<String, String> requestHeaders = new HashMap<>(); if (continuationToken != null) { requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken); } requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize)); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, finalCosmosQueryRequestOptions); return request; }; Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc = request -> ObservableHelper .inlineIfPossibleAsObs(() -> readFeed(request).map(response -> toFeedResponsePage(response, klass)), this.resetSessionTokenRetryPolicy.getRequestPolicy()); return Paginator.getPaginatedQueryResultAsObservable(options, createRequestFunc, executeFunc, klass, maxPageSize); } @Override public Flux<FeedResponse<Offer>> queryOffers(String query, CosmosQueryRequestOptions options) { return queryOffers(new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(null, querySpec, options, Offer.class, ResourceType.Offer); } @Override public Mono<DatabaseAccount> getDatabaseAccount() { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy), documentClientRetryPolicy); } @Override public DatabaseAccount getLatestDatabaseAccount() { return this.globalEndpointManager.getLatestDatabaseAccount(); } private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Getting Database Account"); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.DatabaseAccount, "", (HashMap<String, String>) null, null); return this.read(request, documentClientRetryPolicy).map(ModelBridgeInternal::toDatabaseAccount); } catch (Exception e) { logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e); return Mono.error(e); } } public Object getSession() { return this.sessionContainer; } public void setSession(Object sessionContainer) { this.sessionContainer = (SessionContainer) sessionContainer; } @Override public RxClientCollectionCache getCollectionCache() { return this.collectionCache; } @Override public RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return partitionKeyRangeCache; } public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) { return Flux.defer(() -> { RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.DatabaseAccount, "", null, (Object) null); return this.populateHeaders(request, RequestVerb.GET) .flatMap(requestPopulated -> { requestPopulated.setEndpointOverride(endpoint); return this.gatewayProxy.processMessage(requestPopulated).doOnError(e -> { String message = String.format("Failed to retrieve database account information. %s", e.getCause() != null ? e.getCause().toString() : e.toString()); logger.warn(message); }).map(rsp -> rsp.getResource(DatabaseAccount.class)) .doOnNext(databaseAccount -> this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount)); }); }); } /** * Certain requests must be routed through gateway even when the client connectivity mode is direct. * * @param request * @return RxStoreModel */ private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) { if (request.UseGatewayMode) { return this.gatewayProxy; } ResourceType resourceType = request.getResourceType(); OperationType operationType = request.getOperationType(); if (resourceType == ResourceType.Offer || resourceType == ResourceType.ClientEncryptionKey || resourceType.isScript() && operationType != OperationType.ExecuteJavaScript || resourceType == ResourceType.PartitionKeyRange || resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) { return this.gatewayProxy; } if (operationType == OperationType.Create || operationType == OperationType.Upsert) { if (resourceType == ResourceType.Database || resourceType == ResourceType.User || resourceType == ResourceType.DocumentCollection || resourceType == ResourceType.Permission) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Delete) { if (resourceType == ResourceType.Database || resourceType == ResourceType.User || resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Replace) { if (resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Read) { if (resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else { if ((operationType == OperationType.Query || operationType == OperationType.SqlQuery || operationType == OperationType.ReadFeed) && Utils.isCollectionChild(request.getResourceType())) { if (request.getPartitionKeyRangeIdentity() == null && request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY) == null) { return this.gatewayProxy; } } return this.storeModel; } } @Override public void close() { logger.info("Attempting to close client {}", this.clientId); if (!closed.getAndSet(true)) { logger.info("Shutting down ..."); logger.info("Closing Global Endpoint Manager ..."); LifeCycleUtils.closeQuietly(this.globalEndpointManager); logger.info("Closing StoreClientFactory ..."); LifeCycleUtils.closeQuietly(this.storeClientFactory); logger.info("Shutting down reactorHttpClient ..."); LifeCycleUtils.closeQuietly(this.reactorHttpClient); logger.info("Shutting down CpuMonitor ..."); CpuMemoryMonitor.unregister(this); if (this.throughputControlEnabled.get()) { logger.info("Closing ThroughputControlStore ..."); this.throughputControlStore.close(); } logger.info("Shutting down completed."); } else { logger.warn("Already shutdown!"); } } @Override public ItemDeserializer getItemDeserializer() { return this.itemDeserializer; } @Override public synchronized void enableThroughputControlGroup(ThroughputControlGroupInternal group) { checkNotNull(group, "Throughput control group can not be null"); if (this.throughputControlEnabled.compareAndSet(false, true)) { this.throughputControlStore = new ThroughputControlStore( this.collectionCache, this.connectionPolicy.getConnectionMode(), this.partitionKeyRangeCache); this.storeModel.enableThroughputControl(throughputControlStore); } this.throughputControlStore.enableThroughputControlGroup(group); } private static SqlQuerySpec createLogicalPartitionScanQuerySpec( PartitionKey partitionKey, String partitionKeySelector) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE"); Object pkValue = ModelBridgeInternal.getPartitionKeyObject(partitionKey); String pkParamName = "@pkValue"; parameters.add(new SqlParameter(pkParamName, pkValue)); queryStringBuilder.append(" c"); queryStringBuilder.append(partitionKeySelector); queryStringBuilder.append((" = ")); queryStringBuilder.append(pkParamName); return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } @Override public Mono<List<FeedRange>> getFeedRanges(String collectionLink) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } RxDocumentServiceRequest request = RxDocumentServiceRequest.create( this, OperationType.Query, ResourceType.Document, collectionLink, null); Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request); return collectionObs.flatMap(documentCollectionResourceResponse -> { final DocumentCollection collection = documentCollectionResourceResponse.v; if (collection == null) { throw new IllegalStateException("Collection cannot be null"); } Mono<Utils.ValueHolder<List<PartitionKeyRange>>> valueHolderMono = partitionKeyRangeCache .tryGetOverlappingRangesAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collection.getResourceId(), RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES, true, null); return valueHolderMono.map(RxDocumentClientImpl::toFeedRanges); }); } private static List<FeedRange> toFeedRanges( Utils.ValueHolder<List<PartitionKeyRange>> partitionKeyRangeListValueHolder) { final List<PartitionKeyRange> partitionKeyRangeList = partitionKeyRangeListValueHolder.v; if (partitionKeyRangeList == null) { throw new IllegalStateException("PartitionKeyRange list cannot be null"); } List<FeedRange> feedRanges = new ArrayList<>(); partitionKeyRangeList.forEach(pkRange -> feedRanges.add(toFeedRange(pkRange))); return feedRanges; } private static FeedRange toFeedRange(PartitionKeyRange pkRange) { return new FeedRangeEpkImpl(pkRange.toRange()); } }
class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider, CpuMemoryListener, DiagnosticsClientContext { private static final AtomicInteger activeClientsCnt = new AtomicInteger(0); private static final AtomicInteger clientIdGenerator = new AtomicInteger(0); private static final Range<String> RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES = new Range<>( PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey, PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey, true, false); private static final String DUMMY_SQL_QUERY = "this is dummy and only used in creating " + "ParallelDocumentQueryExecutioncontext, but not used"; private final static ObjectMapper mapper = Utils.getSimpleObjectMapper(); private final ItemDeserializer itemDeserializer = new ItemDeserializer.JsonDeserializer(); private final Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class); private final String masterKeyOrResourceToken; private final URI serviceEndpoint; private final ConnectionPolicy connectionPolicy; private final ConsistencyLevel consistencyLevel; private final BaseAuthorizationTokenProvider authorizationTokenProvider; private final UserAgentContainer userAgentContainer; private final boolean hasAuthKeyResourceToken; private final Configs configs; private final boolean connectionSharingAcrossClientsEnabled; private AzureKeyCredential credential; private final TokenCredential tokenCredential; private String[] tokenCredentialScopes; private SimpleTokenCache tokenCredentialCache; private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver; AuthorizationTokenType authorizationTokenType; private SessionContainer sessionContainer; private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY; private RxClientCollectionCache collectionCache; private RxStoreModel gatewayProxy; private RxStoreModel storeModel; private GlobalAddressResolver addressResolver; private RxPartitionKeyRangeCache partitionKeyRangeCache; private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap; private final boolean contentResponseOnWriteEnabled; private Map<String, PartitionedQueryExecutionInfo> queryPlanCache; private final AtomicBoolean closed = new AtomicBoolean(false); private final int clientId; private ClientTelemetry clientTelemetry; private ApiType apiType; private IRetryPolicyFactory resetSessionTokenRetryPolicy; /** * Compatibility mode: Allows to specify compatibility mode used by client when * making query requests. Should be removed when application/sql is no longer * supported. */ private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default; private final GlobalEndpointManager globalEndpointManager; private final RetryPolicy retryPolicy; private HttpClient reactorHttpClient; private Function<HttpClient, HttpClient> httpClientInterceptor; private volatile boolean useMultipleWriteLocations; private StoreClientFactory storeClientFactory; private GatewayServiceConfigurationReader gatewayConfigurationReader; private final DiagnosticsClientConfig diagnosticsClientConfig; private final AtomicBoolean throughputControlEnabled; private ThroughputControlStore throughputControlStore; public RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver, AzureKeyCredential credential, boolean sessionCapturingOverride, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType) { this(serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs, credential, null, sessionCapturingOverride, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled, metadataCachesSnapshot, apiType); this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver; } public RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver, AzureKeyCredential credential, TokenCredential tokenCredential, boolean sessionCapturingOverride, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType) { this(serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs, credential, tokenCredential, sessionCapturingOverride, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled, metadataCachesSnapshot, apiType); this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver; } private RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, AzureKeyCredential credential, TokenCredential tokenCredential, boolean sessionCapturingOverrideEnabled, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType) { this(serviceEndpoint, masterKeyOrResourceToken, connectionPolicy, consistencyLevel, configs, credential, tokenCredential, sessionCapturingOverrideEnabled, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled, metadataCachesSnapshot, apiType); if (permissionFeed != null && permissionFeed.size() > 0) { this.resourceTokensMap = new HashMap<>(); for (Permission permission : permissionFeed) { String[] segments = StringUtils.split(permission.getResourceLink(), Constants.Properties.PATH_SEPARATOR.charAt(0)); if (segments.length <= 0) { throw new IllegalArgumentException("resourceLink"); } List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null; PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false); if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) { throw new IllegalArgumentException(permission.getResourceLink()); } partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName); if (partitionKeyAndResourceTokenPairs == null) { partitionKeyAndResourceTokenPairs = new ArrayList<>(); this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs); } PartitionKey partitionKey = permission.getResourcePartitionKey(); partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair( partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty, permission.getToken())); logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token [{}]", pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken()); } if(this.resourceTokensMap.isEmpty()) { throw new IllegalArgumentException("permissionFeed"); } String firstToken = permissionFeed.get(0).getToken(); if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) { this.firstResourceTokenFromPermissionFeed = firstToken; } } } RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, AzureKeyCredential credential, TokenCredential tokenCredential, boolean sessionCapturingOverrideEnabled, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType) { activeClientsCnt.incrementAndGet(); this.clientId = clientIdGenerator.getAndDecrement(); this.diagnosticsClientConfig = new DiagnosticsClientConfig(); this.diagnosticsClientConfig.withClientId(this.clientId); this.diagnosticsClientConfig.withActiveClientCounter(activeClientsCnt); this.diagnosticsClientConfig.withConnectionSharingAcrossClientsEnabled(connectionSharingAcrossClientsEnabled); this.diagnosticsClientConfig.withConsistency(consistencyLevel); this.throughputControlEnabled = new AtomicBoolean(false); logger.info( "Initializing DocumentClient [{}] with" + " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}], directModeProtocol [{}]", this.clientId, serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol()); try { this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled; this.configs = configs; this.masterKeyOrResourceToken = masterKeyOrResourceToken; this.serviceEndpoint = serviceEndpoint; this.credential = credential; this.tokenCredential = tokenCredential; this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled; this.authorizationTokenType = AuthorizationTokenType.Invalid; if (this.credential != null) { hasAuthKeyResourceToken = false; this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey; this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential); } else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) { this.authorizationTokenProvider = null; hasAuthKeyResourceToken = true; this.authorizationTokenType = AuthorizationTokenType.ResourceToken; } else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) { this.credential = new AzureKeyCredential(this.masterKeyOrResourceToken); hasAuthKeyResourceToken = false; this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey; this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential); } else { hasAuthKeyResourceToken = false; this.authorizationTokenProvider = null; if (tokenCredential != null) { this.tokenCredentialScopes = new String[] { serviceEndpoint.getScheme() + ": }; this.tokenCredentialCache = new SimpleTokenCache(() -> this.tokenCredential .getToken(new TokenRequestContext().addScopes(this.tokenCredentialScopes))); this.authorizationTokenType = AuthorizationTokenType.AadToken; } } if (connectionPolicy != null) { this.connectionPolicy = connectionPolicy; } else { this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig()); } this.diagnosticsClientConfig.withConnectionMode(this.getConnectionPolicy().getConnectionMode()); this.diagnosticsClientConfig.withMultipleWriteRegionsEnabled(this.connectionPolicy.isMultipleWriteRegionsEnabled()); this.diagnosticsClientConfig.withEndpointDiscoveryEnabled(this.connectionPolicy.isEndpointDiscoveryEnabled()); this.diagnosticsClientConfig.withPreferredRegions(this.connectionPolicy.getPreferredRegions()); boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled); this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing); this.consistencyLevel = consistencyLevel; this.userAgentContainer = new UserAgentContainer(); String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix(); if (userAgentSuffix != null && userAgentSuffix.length() > 0) { userAgentContainer.setSuffix(userAgentSuffix); } this.httpClientInterceptor = null; this.reactorHttpClient = httpClient(); this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs); this.retryPolicy = new RetryPolicy(this, this.globalEndpointManager, this.connectionPolicy); this.resetSessionTokenRetryPolicy = retryPolicy; CpuMemoryMonitor.register(this); this.queryPlanCache = Collections.synchronizedMap(new SizeLimitingLRUCache(Constants.QUERYPLAN_CACHE_SIZE)); this.apiType = apiType; } catch (RuntimeException e) { logger.error("unexpected failure in initializing client.", e); close(); throw e; } } @Override public DiagnosticsClientConfig getConfig() { return diagnosticsClientConfig; } @Override public CosmosDiagnostics createDiagnostics() { return BridgeInternal.createCosmosDiagnostics(this, this.globalEndpointManager); } private void initializeGatewayConfigurationReader() { this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.globalEndpointManager); DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount(); if (databaseAccount == null) { logger.error("Client initialization failed." + " Check if the endpoint is reachable and if your auth token is valid. More info: https: throw new RuntimeException("Client initialization failed." + " Check if the endpoint is reachable and if your auth token is valid. More info: https: } this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount); } public void init(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, Function<HttpClient, HttpClient> httpClientInterceptor) { try { this.httpClientInterceptor = httpClientInterceptor; if (httpClientInterceptor != null) { this.reactorHttpClient = httpClientInterceptor.apply(httpClient()); } this.gatewayProxy = createRxGatewayProxy(this.sessionContainer, this.consistencyLevel, this.queryCompatibilityMode, this.userAgentContainer, this.globalEndpointManager, this.reactorHttpClient, this.apiType); this.globalEndpointManager.init(); this.initializeGatewayConfigurationReader(); if (metadataCachesSnapshot != null) { this.collectionCache = new RxClientCollectionCache(this, this.sessionContainer, this.gatewayProxy, this, this.retryPolicy, metadataCachesSnapshot.getCollectionInfoByNameCache(), metadataCachesSnapshot.getCollectionInfoByIdCache() ); } else { this.collectionCache = new RxClientCollectionCache(this, this.sessionContainer, this.gatewayProxy, this, this.retryPolicy); } this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy); this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this, collectionCache); updateGatewayProxy(); if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) { this.storeModel = this.gatewayProxy; } else { this.initializeDirectConnectivity(); } clientTelemetry = new ClientTelemetry(null, UUID.randomUUID().toString(), ManagementFactory.getRuntimeMXBean().getName(), userAgentContainer.getUserAgent(), connectionPolicy.getConnectionMode(), globalEndpointManager.getLatestDatabaseAccount().getId(), null, null, this.reactorHttpClient, connectionPolicy.isClientTelemetryEnabled(), this, this.connectionPolicy.getPreferredRegions()); clientTelemetry.init(); this.queryPlanCache = new ConcurrentHashMap<>(); this.retryPolicy.setRxCollectionCache(this.collectionCache); } catch (Exception e) { logger.error("unexpected failure in initializing client.", e); close(); throw e; } } public void serialize(CosmosClientMetadataCachesSnapshot state) { RxCollectionCache.serialize(state, this.collectionCache); } private void initializeDirectConnectivity() { this.addressResolver = new GlobalAddressResolver(this, this.reactorHttpClient, this.globalEndpointManager, this.configs.getProtocol(), this, this.collectionCache, this.partitionKeyRangeCache, userAgentContainer, null, this.connectionPolicy, this.apiType); this.storeClientFactory = new StoreClientFactory( this.addressResolver, this.diagnosticsClientConfig, this.configs, this.connectionPolicy, this.userAgentContainer, this.connectionSharingAcrossClientsEnabled ); this.createStoreModel(true); } DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() { return new DatabaseAccountManagerInternal() { @Override public URI getServiceEndpoint() { return RxDocumentClientImpl.this.getServiceEndpoint(); } @Override public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) { logger.info("Getting database account endpoint from {}", endpoint); return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint); } @Override public ConnectionPolicy getConnectionPolicy() { return RxDocumentClientImpl.this.getConnectionPolicy(); } }; } RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer, ConsistencyLevel consistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient, ApiType apiType) { return new RxGatewayStoreModel( this, sessionContainer, consistencyLevel, queryCompatibilityMode, userAgentContainer, globalEndpointManager, httpClient, apiType); } private HttpClient httpClient() { HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs) .withMaxIdleConnectionTimeout(this.connectionPolicy.getIdleHttpConnectionTimeout()) .withPoolSize(this.connectionPolicy.getMaxConnectionPoolSize()) .withProxy(this.connectionPolicy.getProxy()) .withNetworkRequestTimeout(this.connectionPolicy.getHttpNetworkRequestTimeout()); if (connectionSharingAcrossClientsEnabled) { return SharedGatewayHttpClient.getOrCreateInstance(httpClientConfig, diagnosticsClientConfig); } else { diagnosticsClientConfig.withGatewayHttpClientConfig(httpClientConfig); return HttpClient.createFixed(httpClientConfig); } } private void createStoreModel(boolean subscribeRntbdStatus) { StoreClient storeClient = this.storeClientFactory.createStoreClient(this, this.addressResolver, this.sessionContainer, this.gatewayConfigurationReader, this, this.useMultipleWriteLocations ); this.storeModel = new ServerStoreModel(storeClient); } @Override public URI getServiceEndpoint() { return this.serviceEndpoint; } @Override public URI getWriteEndpoint() { return globalEndpointManager.getWriteEndpoints().stream().findFirst().orElse(null); } @Override public URI getReadEndpoint() { return globalEndpointManager.getReadEndpoints().stream().findFirst().orElse(null); } @Override public ConnectionPolicy getConnectionPolicy() { return this.connectionPolicy; } @Override public boolean isContentResponseOnWriteEnabled() { return contentResponseOnWriteEnabled; } @Override public ConsistencyLevel getConsistencyLevel() { return consistencyLevel; } @Override public ClientTelemetry getClientTelemetry() { return this.clientTelemetry; } @Override public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (database == null) { throw new IllegalArgumentException("Database"); } logger.debug("Creating a Database. id: [{}]", database.getId()); validateResource(database); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Create); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(database); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.DATABASE_SERIALIZATION); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Create, ResourceType.Database, Paths.DATABASES_ROOT, byteBuffer, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)) .map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink); String path = Utils.joinPath(databaseLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Database, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } logger.debug("Reading a Database. databaseLink: [{}]", databaseLink); String path = Utils.joinPath(databaseLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Database, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Database>> readDatabases(CosmosQueryRequestOptions options) { return readFeed(options, ResourceType.Database, Database.class, Paths.DATABASES_ROOT); } private String parentResourceLinkToQueryLink(String parentResourceLink, ResourceType resourceTypeEnum) { switch (resourceTypeEnum) { case Database: return Paths.DATABASES_ROOT; case DocumentCollection: return Utils.joinPath(parentResourceLink, Paths.COLLECTIONS_PATH_SEGMENT); case Document: return Utils.joinPath(parentResourceLink, Paths.DOCUMENTS_PATH_SEGMENT); case Offer: return Paths.OFFERS_ROOT; case User: return Utils.joinPath(parentResourceLink, Paths.USERS_PATH_SEGMENT); case ClientEncryptionKey: return Utils.joinPath(parentResourceLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT); case Permission: return Utils.joinPath(parentResourceLink, Paths.PERMISSIONS_PATH_SEGMENT); case Attachment: return Utils.joinPath(parentResourceLink, Paths.ATTACHMENTS_PATH_SEGMENT); case StoredProcedure: return Utils.joinPath(parentResourceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT); case Trigger: return Utils.joinPath(parentResourceLink, Paths.TRIGGERS_PATH_SEGMENT); case UserDefinedFunction: return Utils.joinPath(parentResourceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); case Conflict: return Utils.joinPath(parentResourceLink, Paths.CONFLICTS_PATH_SEGMENT); default: throw new IllegalArgumentException("resource type not supported"); } } private OperationContextAndListenerTuple getOperationContextAndListenerTuple(CosmosQueryRequestOptions options) { if (options == null) { return null; } return ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor().getOperationContext(options); } private OperationContextAndListenerTuple getOperationContextAndListenerTuple(RequestOptions options) { if (options == null) { return null; } return options.getOperationContextAndListenerTuple(); } private <T extends Resource> Flux<FeedResponse<T>> createQuery( String parentResourceLink, SqlQuerySpec sqlQuery, CosmosQueryRequestOptions options, Class<T> klass, ResourceType resourceTypeEnum) { String resourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum); UUID activityId = Utils.randomUUID(); IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options)); InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy( this.collectionCache, null, resourceLink, ModelBridgeInternal.getPropertiesFromQueryRequestOptions(options)); return ObservableHelper.fluxInlineIfPossibleAsObs( () -> createQueryInternal(resourceLink, sqlQuery, options, klass, resourceTypeEnum, queryClient, activityId), invalidPartitionExceptionRetryPolicy); } private <T extends Resource> Flux<FeedResponse<T>> createQueryInternal( String resourceLink, SqlQuerySpec sqlQuery, CosmosQueryRequestOptions options, Class<T> klass, ResourceType resourceTypeEnum, IDocumentQueryClient queryClient, UUID activityId) { Flux<? extends IDocumentQueryExecutionContext<T>> executionContext = DocumentQueryExecutionContextFactory .createDocumentQueryExecutionContextAsync(this, queryClient, resourceTypeEnum, klass, sqlQuery, options, resourceLink, false, activityId, Configs.isQueryPlanCachingEnabled(), queryPlanCache); AtomicBoolean isFirstResponse = new AtomicBoolean(true); return executionContext.flatMap(iDocumentQueryExecutionContext -> { QueryInfo queryInfo = null; if (iDocumentQueryExecutionContext instanceof PipelinedDocumentQueryExecutionContext) { queryInfo = ((PipelinedDocumentQueryExecutionContext<T>) iDocumentQueryExecutionContext).getQueryInfo(); } QueryInfo finalQueryInfo = queryInfo; return iDocumentQueryExecutionContext.executeAsync() .map(tFeedResponse -> { if (finalQueryInfo != null) { if (finalQueryInfo.hasSelectValue()) { ModelBridgeInternal .addQueryInfoToFeedResponse(tFeedResponse, finalQueryInfo); } if (isFirstResponse.compareAndSet(true, false)) { ModelBridgeInternal.addQueryPlanDiagnosticsContextToFeedResponse(tFeedResponse, finalQueryInfo.getQueryPlanDiagnosticsContext()); } } return tFeedResponse; }); }); } @Override public Flux<FeedResponse<Database>> queryDatabases(String query, CosmosQueryRequestOptions options) { return queryDatabases(new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(Paths.DATABASES_ROOT, querySpec, options, Database.class, ResourceType.Database); } @Override public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink, DocumentCollection collection, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink, DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (collection == null) { throw new IllegalArgumentException("collection"); } logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink, collection.getId()); validateResource(collection); String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Create); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Create, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class)) .doOnNext(resourceResponse -> { this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(), getAltLink(resourceResponse.getResource()), resourceResponse.getResponseHeaders()); }); } catch (Exception e) { logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (collection == null) { throw new IllegalArgumentException("collection"); } logger.debug("Replacing a Collection. id: [{}]", collection.getId()); validateResource(collection); String path = Utils.joinPath(collection.getSelfLink(), null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Replace); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)) .doOnNext(resourceResponse -> { if (resourceResponse.getResource() != null) { this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(), getAltLink(resourceResponse.getResource()), resourceResponse.getResponseHeaders()); } }); } catch (Exception e) { logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.DocumentCollection, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class)); } catch (Exception e) { logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e); return Mono.error(e); } } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeaders(request, RequestVerb.DELETE) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple); }); } private Mono<RxDocumentServiceResponse> deleteAllItemsByPartitionKey(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeaders(request, RequestVerb.POST) .flatMap(requestPopulated -> { RxStoreModel storeProxy = this.getStoreProxy(requestPopulated); if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple); }); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { return populateHeaders(request, RequestVerb.GET) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated); }); } Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return populateHeaders(request, RequestVerb.GET) .flatMap(requestPopulated -> getStoreProxy(requestPopulated).processMessage(requestPopulated)); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { return populateHeaders(request, RequestVerb.POST) .flatMap(requestPopulated -> this.getStoreProxy(requestPopulated).processMessage(requestPopulated) .map(response -> { this.captureSessionToken(requestPopulated, response); return response; } )); } @Override public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.DocumentCollection, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)); } catch (Exception e) { logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return readFeed(options, ResourceType.DocumentCollection, DocumentCollection.class, Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query, CosmosQueryRequestOptions options) { return createQuery(databaseLink, new SqlQuerySpec(query), options, DocumentCollection.class, ResourceType.DocumentCollection); } @Override public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(databaseLink, querySpec, options, DocumentCollection.class, ResourceType.DocumentCollection); } private static String serializeProcedureParams(List<Object> objectArray) { String[] stringArray = new String[objectArray.size()]; for (int i = 0; i < objectArray.size(); ++i) { Object object = objectArray.get(i); if (object instanceof JsonSerializable) { stringArray[i] = ModelBridgeInternal.toJsonFromJsonSerializable((JsonSerializable) object); } else { try { stringArray[i] = mapper.writeValueAsString(object); } catch (IOException e) { throw new IllegalArgumentException("Can't serialize the object into the json string", e); } } } return String.format("[%s]", StringUtils.join(stringArray, ",")); } private static void validateResource(Resource resource) { if (!StringUtils.isEmpty(resource.getId())) { if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 || resource.getId().indexOf('?') != -1 || resource.getId().indexOf(' throw new IllegalArgumentException("Id contains illegal chars."); } if (resource.getId().endsWith(" ")) { throw new IllegalArgumentException("Id ends with a space."); } } } private Map<String, String> getRequestHeaders(RequestOptions options, ResourceType resourceType, OperationType operationType) { Map<String, String> headers = new HashMap<>(); if (this.useMultipleWriteLocations) { headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString()); } if (consistencyLevel != null) { headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString()); } if (options == null) { if (!this.contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) { headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL); } return headers; } Map<String, String> customOptions = options.getHeaders(); if (customOptions != null) { headers.putAll(customOptions); } boolean contentResponseOnWriteEnabled = this.contentResponseOnWriteEnabled; if (options.isContentResponseOnWriteEnabled() != null) { contentResponseOnWriteEnabled = options.isContentResponseOnWriteEnabled(); } if (!contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) { headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL); } if (options.getIfMatchETag() != null) { headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getIfMatchETag()); } if(options.getIfNoneMatchETag() != null) { headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getIfNoneMatchETag()); } if (options.getConsistencyLevel() != null) { headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString()); } if (options.getIndexingDirective() != null) { headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString()); } if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) { String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ","); headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude); } if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) { String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ","); headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude); } if (!Strings.isNullOrEmpty(options.getSessionToken())) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken()); } if (options.getResourceTokenExpirySeconds() != null) { headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY, String.valueOf(options.getResourceTokenExpirySeconds())); } if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) { headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString()); } else if (options.getOfferType() != null) { headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType()); } if (options.getOfferThroughput() == null) { if (options.getThroughputProperties() != null) { Offer offer = ModelBridgeInternal.getOfferFromThroughputProperties(options.getThroughputProperties()); final OfferAutoscaleSettings offerAutoscaleSettings = offer.getOfferAutoScaleSettings(); OfferAutoscaleAutoUpgradeProperties autoscaleAutoUpgradeProperties = null; if (offerAutoscaleSettings != null) { autoscaleAutoUpgradeProperties = offer.getOfferAutoScaleSettings().getAutoscaleAutoUpgradeProperties(); } if (offer.hasOfferThroughput() && (offerAutoscaleSettings != null && offerAutoscaleSettings.getMaxThroughput() >= 0 || autoscaleAutoUpgradeProperties != null && autoscaleAutoUpgradeProperties .getAutoscaleThroughputProperties() .getIncrementPercent() >= 0)) { throw new IllegalArgumentException("Autoscale provisioned throughput can not be configured with " + "fixed offer"); } if (offer.hasOfferThroughput()) { headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, String.valueOf(offer.getThroughput())); } else if (offer.getOfferAutoScaleSettings() != null) { headers.put(HttpConstants.HttpHeaders.OFFER_AUTOPILOT_SETTINGS, ModelBridgeInternal.toJsonFromJsonSerializable(offer.getOfferAutoScaleSettings())); } } } if (options.isQuotaInfoEnabled()) { headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true)); } if (options.isScriptLoggingEnabled()) { headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true)); } if (options.getDedicatedGatewayRequestOptions() != null && options.getDedicatedGatewayRequestOptions().getMaxIntegratedCacheStaleness() != null) { headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_CACHE_STALENESS, String.valueOf(Utils.getMaxIntegratedCacheStalenessInMillis(options.getDedicatedGatewayRequestOptions()))); } return headers; } public IRetryPolicyFactory getResetSessionTokenRetryPolicy() { return this.resetSessionTokenRetryPolicy; } private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request, ByteBuffer contentAsByteBuffer, Document document, RequestOptions options) { Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); return collectionObs .map(collectionValueHolder -> { addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v); return request; }); } private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request, ByteBuffer contentAsByteBuffer, Object document, RequestOptions options, Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) { return collectionObs.map(collectionValueHolder -> { addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v); return request; }); } private void addPartitionKeyInformation(RxDocumentServiceRequest request, ByteBuffer contentAsByteBuffer, Object objectDoc, RequestOptions options, DocumentCollection collection) { PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey(); PartitionKeyInternal partitionKeyInternal = null; if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){ partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition); } else if (options != null && options.getPartitionKey() != null) { partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey()); } else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) { partitionKeyInternal = PartitionKeyInternal.getEmpty(); } else if (contentAsByteBuffer != null || objectDoc != null) { InternalObjectNode internalObjectNode; if (objectDoc instanceof InternalObjectNode) { internalObjectNode = (InternalObjectNode) objectDoc; } else if (objectDoc instanceof ObjectNode) { internalObjectNode = new InternalObjectNode((ObjectNode)objectDoc); } else if (contentAsByteBuffer != null) { contentAsByteBuffer.rewind(); internalObjectNode = new InternalObjectNode(contentAsByteBuffer); } else { throw new IllegalStateException("ContentAsByteBuffer and objectDoc are null"); } Instant serializationStartTime = Instant.now(); partitionKeyInternal = extractPartitionKeyValueFromDocument(internalObjectNode, partitionKeyDefinition); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTime, serializationEndTime, SerializationDiagnosticsContext.SerializationType.PARTITION_KEY_FETCH_SERIALIZATION ); SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } } else { throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation."); } request.setPartitionKeyInternal(partitionKeyInternal); request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson())); } public static PartitionKeyInternal extractPartitionKeyValueFromDocument( InternalObjectNode document, PartitionKeyDefinition partitionKeyDefinition) { if (partitionKeyDefinition != null) { switch (partitionKeyDefinition.getKind()) { case HASH: String path = partitionKeyDefinition.getPaths().iterator().next(); List<String> parts = PathParser.getPathParts(path); if (parts.size() >= 1) { Object value = ModelBridgeInternal.getObjectByPathFromJsonSerializable(document, parts); if (value == null || value.getClass() == ObjectNode.class) { value = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition); } if (value instanceof PartitionKeyInternal) { return (PartitionKeyInternal) value; } else { return PartitionKeyInternal.fromObjectArray(Collections.singletonList(value), false); } } break; case MULTI_HASH: Object[] partitionKeyValues = new Object[partitionKeyDefinition.getPaths().size()]; for(int pathIter = 0 ; pathIter < partitionKeyDefinition.getPaths().size(); pathIter++){ String partitionPath = partitionKeyDefinition.getPaths().get(pathIter); List<String> partitionPathParts = PathParser.getPathParts(partitionPath); partitionKeyValues[pathIter] = ModelBridgeInternal.getObjectByPathFromJsonSerializable(document, partitionPathParts); } return PartitionKeyInternal.fromObjectArray(partitionKeyValues, false); default: throw new IllegalArgumentException("Unrecognized Partition kind: " + partitionKeyDefinition.getKind()); } } return null; } private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy, String documentCollectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, OperationType operationType) { if (StringUtils.isEmpty(documentCollectionLink)) { throw new IllegalArgumentException("documentCollectionLink"); } if (document == null) { throw new IllegalArgumentException("document"); } Instant serializationStartTimeUTC = Instant.now(); ByteBuffer content = BridgeInternal.serializeJsonToByteBuffer(document, mapper); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.Document, path, requestHeaders, options, content); if (requestRetryPolicy != null) { requestRetryPolicy.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); return addPartitionKeyInformation(request, content, document, options, collectionObs); } private Mono<RxDocumentServiceRequest> getBatchDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy, String documentCollectionLink, ServerBatchRequest serverBatchRequest, RequestOptions options, boolean disableAutomaticIdGeneration) { checkArgument(StringUtils.isNotEmpty(documentCollectionLink), "expected non empty documentCollectionLink"); checkNotNull(serverBatchRequest, "expected non null serverBatchRequest"); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer content = ByteBuffer.wrap(Utils.getUTF8Bytes(serverBatchRequest.getRequestBody())); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Batch); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( this, OperationType.Batch, ResourceType.Document, path, requestHeaders, options, content); if (requestRetryPolicy != null) { requestRetryPolicy.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); return collectionObs.map((Utils.ValueHolder<DocumentCollection> collectionValueHolder) -> { addBatchHeaders(request, serverBatchRequest, collectionValueHolder.v); return request; }); } private RxDocumentServiceRequest addBatchHeaders(RxDocumentServiceRequest request, ServerBatchRequest serverBatchRequest, DocumentCollection collection) { if(serverBatchRequest instanceof SinglePartitionKeyServerBatchRequest) { PartitionKey partitionKey = ((SinglePartitionKeyServerBatchRequest) serverBatchRequest).getPartitionKeyValue(); PartitionKeyInternal partitionKeyInternal; if (partitionKey.equals(PartitionKey.NONE)) { PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey(); partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition); } else { partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(partitionKey); } request.setPartitionKeyInternal(partitionKeyInternal); request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson())); } else if(serverBatchRequest instanceof PartitionKeyRangeServerBatchRequest) { request.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(((PartitionKeyRangeServerBatchRequest) serverBatchRequest).getPartitionKeyRangeId())); } else { throw new UnsupportedOperationException("Unknown Server request."); } request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_REQUEST, Boolean.TRUE.toString()); request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_ATOMIC, String.valueOf(serverBatchRequest.isAtomicBatch())); request.getHeaders().put(HttpConstants.HttpHeaders.SHOULD_BATCH_CONTINUE_ON_ERROR, String.valueOf(serverBatchRequest.isShouldContinueOnError())); request.setNumberOfItemsInBatchRequest(serverBatchRequest.getOperations().size()); return request; } private Mono<RxDocumentServiceRequest> populateHeaders(RxDocumentServiceRequest request, RequestVerb httpMethod) { request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123()); if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null || this.cosmosAuthorizationTokenResolver != null || this.credential != null) { String resourceName = request.getResourceAddress(); String authorization = this.getUserAuthorizationToken( resourceName, request.getResourceType(), httpMethod, request.getHeaders(), AuthorizationTokenType.PrimaryMasterKey, request.properties); try { authorization = URLEncoder.encode(authorization, "UTF-8"); } catch (UnsupportedEncodingException e) { throw new IllegalStateException("Failed to encode authtoken.", e); } request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); } if (this.apiType != null) { request.getHeaders().put(HttpConstants.HttpHeaders.API_TYPE, this.apiType.toString()); } if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod)) && !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) { request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON); } if (RequestVerb.PATCH.equals(httpMethod) && !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) { request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON_PATCH); } if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) { request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON); } MetadataDiagnosticsContext metadataDiagnosticsCtx = BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics); if (this.requiresFeedRangeFiltering(request)) { return request.getFeedRange() .populateFeedRangeFilteringHeaders( this.getPartitionKeyRangeCache(), request, this.collectionCache.resolveCollectionAsync(metadataDiagnosticsCtx, request)) .flatMap(this::populateAuthorizationHeader); } return this.populateAuthorizationHeader(request); } private boolean requiresFeedRangeFiltering(RxDocumentServiceRequest request) { if (request.getResourceType() != ResourceType.Document && request.getResourceType() != ResourceType.Conflict) { return false; } switch (request.getOperationType()) { case ReadFeed: case Query: case SqlQuery: return request.getFeedRange() != null; default: return false; } } @Override public Mono<RxDocumentServiceRequest> populateAuthorizationHeader(RxDocumentServiceRequest request) { if (request == null) { throw new IllegalArgumentException("request"); } if (this.authorizationTokenType == AuthorizationTokenType.AadToken) { return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache) .map(authorization -> { request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); return request; }); } else { return Mono.just(request); } } @Override public Mono<HttpHeaders> populateAuthorizationHeader(HttpHeaders httpHeaders) { if (httpHeaders == null) { throw new IllegalArgumentException("httpHeaders"); } if (this.authorizationTokenType == AuthorizationTokenType.AadToken) { return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache) .map(authorization -> { httpHeaders.set(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); return httpHeaders; }); } return Mono.just(httpHeaders); } @Override public AuthorizationTokenType getAuthorizationTokenType() { return this.authorizationTokenType; } @Override public String getUserAuthorizationToken(String resourceName, ResourceType resourceType, RequestVerb requestVerb, Map<String, String> headers, AuthorizationTokenType tokenType, Map<String, Object> properties) { if (this.cosmosAuthorizationTokenResolver != null) { return this.cosmosAuthorizationTokenResolver.getAuthorizationToken(requestVerb, resourceName, this.resolveCosmosResourceType(resourceType), properties != null ? Collections.unmodifiableMap(properties) : null); } else if (credential != null) { return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName, resourceType, headers); } else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) { return masterKeyOrResourceToken; } else { assert resourceTokensMap != null; if(resourceType.equals(ResourceType.DatabaseAccount)) { return this.firstResourceTokenFromPermissionFeed; } return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers); } } private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) { CosmosResourceType cosmosResourceType = ModelBridgeInternal.fromServiceSerializedFormat(resourceType.toString()); if (cosmosResourceType == null) { return CosmosResourceType.SYSTEM; } return cosmosResourceType; } void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) { this.sessionContainer.setSessionToken(request, response.getResponseHeaders()); } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeaders(request, RequestVerb.POST) .flatMap(requestPopulated -> { RxStoreModel storeProxy = this.getStoreProxy(requestPopulated); if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple); }); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeaders(request, RequestVerb.POST) .flatMap(requestPopulated -> { Map<String, String> headers = requestPopulated.getHeaders(); assert (headers != null); headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true"); if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple) .map(response -> { this.captureSessionToken(requestPopulated, response); return response; } ); }); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { return populateHeaders(request, RequestVerb.PUT) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated); }); } private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { populateHeaders(request, RequestVerb.PATCH); if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(request).processMessage(request); } @Override public Mono<ResourceResponse<Document>> createDocument(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs(() -> createDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), requestRetryPolicy); } private Mono<ResourceResponse<Document>> createDocumentInternal(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy requestRetryPolicy) { try { logger.debug("Creating a Document. collectionLink: [{}]", collectionLink); Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(requestRetryPolicy, collectionLink, document, options, disableAutomaticIdGeneration, OperationType.Create); Mono<RxDocumentServiceResponse> responseObservable = requestObs.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options))); return responseObservable .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs(() -> upsertDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), finalRetryPolicyInstance); } private Mono<ResourceResponse<Document>> upsertDocumentInternal(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink); Mono<RxDocumentServiceRequest> reqObs = getCreateDocumentRequest(retryPolicyInstance, collectionLink, document, options, disableAutomaticIdGeneration, OperationType.Upsert); Mono<RxDocumentServiceResponse> responseObservable = reqObs.flatMap(request -> upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options))); return responseObservable .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { String collectionLink = Utils.getCollectionName(documentLink); requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(documentLink, document, options, finalRequestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink, Object document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } if (document == null) { throw new IllegalArgumentException("document"); } Document typedDocument = documentFromObject(document, mapper); return this.replaceDocumentInternal(documentLink, typedDocument, options, retryPolicyInstance); } catch (Exception e) { logger.debug("Failure in replacing a document due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { String collectionLink = document.getSelfLink(); requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(document, options, finalRequestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<Document>> replaceDocumentInternal(Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (document == null) { throw new IllegalArgumentException("document"); } return this.replaceDocumentInternal(document.getSelfLink(), document, options, retryPolicyInstance); } catch (Exception e) { logger.debug("Failure in replacing a database due to [{}]", e.getMessage()); return Mono.error(e); } } private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink, Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { if (document == null) { throw new IllegalArgumentException("document"); } logger.debug("Replacing a Document. documentLink: [{}]", documentLink); final String path = Utils.joinPath(documentLink, null); final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Replace); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer content = serializeJsonToByteBuffer(document); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, content, document, options, collectionObs); return requestObs.flatMap(req -> replace(request, retryPolicyInstance) .map(resp -> toResourceResponse(resp, Document.class))); } @Override public Mono<ResourceResponse<Document>> patchDocument(String documentLink, CosmosPatchOperations cosmosPatchOperations, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> patchDocumentInternal(documentLink, cosmosPatchOperations, options, documentClientRetryPolicy), documentClientRetryPolicy); } private Mono<ResourceResponse<Document>> patchDocumentInternal(String documentLink, CosmosPatchOperations cosmosPatchOperations, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { checkArgument(StringUtils.isNotEmpty(documentLink), "expected non empty documentLink"); checkNotNull(cosmosPatchOperations, "expected non null cosmosPatchOperations"); logger.debug("Running patch operations on Document. documentLink: [{}]", documentLink); final String path = Utils.joinPath(documentLink, null); final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Patch); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer content = ByteBuffer.wrap(PatchUtil.serializeCosmosPatchToByteArray(cosmosPatchOperations, options)); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); final RxDocumentServiceRequest request = RxDocumentServiceRequest.create( this, OperationType.Patch, ResourceType.Document, path, requestHeaders, options, content); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation( request, null, null, options, collectionObs); return requestObs.flatMap(req -> patch(request, retryPolicyInstance) .map(resp -> toResourceResponse(resp, Document.class))); } @Override public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(documentLink, null, options, requestRetryPolicy), requestRetryPolicy); } @Override public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(documentLink, internalObjectNode, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<Document>> deleteDocumentInternal(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } logger.debug("Deleting a Document. documentLink: [{}]", documentLink); String path = Utils.joinPath(documentLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Document, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, internalObjectNode, options, collectionObs); return requestObs.flatMap(req -> this .delete(req, retryPolicyInstance, getOperationContextAndListenerTuple(options)) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class))); } catch (Exception e) { logger.debug("Failure in deleting a document due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKey(String collectionLink, PartitionKey partitionKey, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteAllDocumentsByPartitionKeyInternal(collectionLink, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKeyInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Deleting all items by Partition Key. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.PartitionKey, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.PartitionKey, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs); return requestObs.flatMap(req -> this .deleteAllItemsByPartitionKey(req, retryPolicyInstance, getOperationContextAndListenerTuple(options)) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class))); } catch (Exception e) { logger.debug("Failure in deleting documents due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readDocumentInternal(documentLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Document>> readDocumentInternal(String documentLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } logger.debug("Reading a Document. documentLink: [{}]", documentLink); String path = Utils.joinPath(documentLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Document, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs); return requestObs.flatMap(req -> { return this.read(request, retryPolicyInstance).map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); }); } catch (Exception e) { logger.debug("Failure in reading a document due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Flux<FeedResponse<Document>> readDocuments(String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return queryDocuments(collectionLink, "SELECT * FROM r", options); } @Override public <T> Mono<FeedResponse<T>> readMany( List<CosmosItemIdentity> itemIdentityList, String collectionLink, CosmosQueryRequestOptions options, Class<T> klass) { String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Query, ResourceType.Document, collectionLink, null ); Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request); return collectionObs .flatMap(documentCollectionResourceResponse -> { final DocumentCollection collection = documentCollectionResourceResponse.v; if (collection == null) { throw new IllegalStateException("Collection cannot be null"); } final PartitionKeyDefinition pkDefinition = collection.getPartitionKey(); Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache .tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collection.getResourceId(), null, null); return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> { Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap = new HashMap<>(); CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v; if (routingMap == null) { throw new IllegalStateException("Failed to get routing map."); } itemIdentityList .forEach(itemIdentity -> { String effectivePartitionKeyString = PartitionKeyInternalHelper .getEffectivePartitionKeyString( BridgeInternal.getPartitionKeyInternal( itemIdentity.getPartitionKey()), pkDefinition); PartitionKeyRange range = routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString); if (partitionRangeItemKeyMap.get(range) == null) { List<CosmosItemIdentity> list = new ArrayList<>(); list.add(itemIdentity); partitionRangeItemKeyMap.put(range, list); } else { List<CosmosItemIdentity> pairs = partitionRangeItemKeyMap.get(range); pairs.add(itemIdentity); partitionRangeItemKeyMap.put(range, pairs); } }); Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap; rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap, collection.getPartitionKey()); return createReadManyQuery( resourceLink, new SqlQuerySpec(DUMMY_SQL_QUERY), options, Document.class, ResourceType.Document, collection, Collections.unmodifiableMap(rangeQueryMap)) .collectList() .map(feedList -> { List<T> finalList = new ArrayList<>(); HashMap<String, String> headers = new HashMap<>(); ConcurrentMap<String, QueryMetrics> aggregatedQueryMetrics = new ConcurrentHashMap<>(); double requestCharge = 0; for (FeedResponse<Document> page : feedList) { ConcurrentMap<String, QueryMetrics> pageQueryMetrics = ModelBridgeInternal.queryMetrics(page); if (pageQueryMetrics != null) { pageQueryMetrics.forEach( aggregatedQueryMetrics::putIfAbsent); } requestCharge += page.getRequestCharge(); finalList.addAll(page.getResults().stream().map(document -> ModelBridgeInternal.toObjectFromJsonSerializable(document, klass)).collect(Collectors.toList())); } headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double .toString(requestCharge)); FeedResponse<T> frp = BridgeInternal .createFeedResponse(finalList, headers); return frp; }); }); } ); } private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap( Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap, PartitionKeyDefinition partitionKeyDefinition) { Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>(); String partitionKeySelector = createPkSelector(partitionKeyDefinition); for(Map.Entry<PartitionKeyRange, List<CosmosItemIdentity>> entry: partitionRangeItemKeyMap.entrySet()) { SqlQuerySpec sqlQuerySpec; if (partitionKeySelector.equals("[\"id\"]")) { sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(entry.getValue(), partitionKeySelector); } else { sqlQuerySpec = createReadManyQuerySpec(entry.getValue(), partitionKeySelector); } rangeQueryMap.put(entry.getKey(), sqlQuerySpec); } return rangeQueryMap; } private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame( List<CosmosItemIdentity> idPartitionKeyPairList, String partitionKeySelector) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( "); for (int i = 0; i < idPartitionKeyPairList.size(); i++) { CosmosItemIdentity itemIdentity = idPartitionKeyPairList.get(i); String idValue = itemIdentity.getId(); String idParamName = "@param" + i; PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey(); Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey); if (!Objects.equals(idValue, pkValue)) { continue; } parameters.add(new SqlParameter(idParamName, idValue)); queryStringBuilder.append(idParamName); if (i < idPartitionKeyPairList.size() - 1) { queryStringBuilder.append(", "); } } queryStringBuilder.append(" )"); return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } private SqlQuerySpec createReadManyQuerySpec(List<CosmosItemIdentity> itemIdentities, String partitionKeySelector) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE ( "); for (int i = 0; i < itemIdentities.size(); i++) { CosmosItemIdentity itemIdentity = itemIdentities.get(i); PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey(); Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey); String pkParamName = "@param" + (2 * i); parameters.add(new SqlParameter(pkParamName, pkValue)); String idValue = itemIdentity.getId(); String idParamName = "@param" + (2 * i + 1); parameters.add(new SqlParameter(idParamName, idValue)); queryStringBuilder.append("("); queryStringBuilder.append("c.id = "); queryStringBuilder.append(idParamName); queryStringBuilder.append(" AND "); queryStringBuilder.append(" c"); queryStringBuilder.append(partitionKeySelector); queryStringBuilder.append((" = ")); queryStringBuilder.append(pkParamName); queryStringBuilder.append(" )"); if (i < itemIdentities.size() - 1) { queryStringBuilder.append(" OR "); } } queryStringBuilder.append(" )"); return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } private String createPkSelector(PartitionKeyDefinition partitionKeyDefinition) { return partitionKeyDefinition.getPaths() .stream() .map(pathPart -> StringUtils.substring(pathPart, 1)) .map(pathPart -> StringUtils.replace(pathPart, "\"", "\\")) .map(part -> "[\"" + part + "\"]") .collect(Collectors.joining()); } private <T extends Resource> Flux<FeedResponse<T>> createReadManyQuery( String parentResourceLink, SqlQuerySpec sqlQuery, CosmosQueryRequestOptions options, Class<T> klass, ResourceType resourceTypeEnum, DocumentCollection collection, Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) { UUID activityId = Utils.randomUUID(); IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options)); Flux<? extends IDocumentQueryExecutionContext<T>> executionContext = DocumentQueryExecutionContextFactory.createReadManyQueryAsync(this, queryClient, collection.getResourceId(), sqlQuery, rangeQueryMap, options, collection.getResourceId(), parentResourceLink, activityId, klass, resourceTypeEnum); return executionContext.flatMap(IDocumentQueryExecutionContext<T>::executeAsync); } @Override public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, String query, CosmosQueryRequestOptions options) { return queryDocuments(collectionLink, new SqlQuerySpec(query), options); } private IDocumentQueryClient documentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl, OperationContextAndListenerTuple operationContextAndListenerTuple) { return new IDocumentQueryClient () { @Override public RxCollectionCache getCollectionCache() { return RxDocumentClientImpl.this.collectionCache; } @Override public RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return RxDocumentClientImpl.this.partitionKeyRangeCache; } @Override public IRetryPolicyFactory getResetSessionTokenRetryPolicy() { return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy; } @Override public ConsistencyLevel getDefaultConsistencyLevelAsync() { return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel(); } @Override public ConsistencyLevel getDesiredConsistencyLevelAsync() { return RxDocumentClientImpl.this.consistencyLevel; } @Override public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) { if (operationContextAndListenerTuple == null) { return RxDocumentClientImpl.this.query(request).single(); } else { final OperationListener listener = operationContextAndListenerTuple.getOperationListener(); final OperationContext operationContext = operationContextAndListenerTuple.getOperationContext(); request.getHeaders().put(HttpConstants.HttpHeaders.CORRELATED_ACTIVITY_ID, operationContext.getCorrelationActivityId()); listener.requestListener(operationContext, request); return RxDocumentClientImpl.this.query(request).single().doOnNext( response -> listener.responseListener(operationContext, response) ).doOnError( ex -> listener.exceptionListener(operationContext, ex) ); } } @Override public QueryCompatibilityMode getQueryCompatibilityMode() { return QueryCompatibilityMode.Default; } @Override public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) { return null; } }; } @Override public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { SqlQuerySpecLogger.getInstance().logQuery(querySpec); return createQuery(collectionLink, querySpec, options, Document.class, ResourceType.Document); } @Override public Flux<FeedResponse<Document>> queryDocumentChangeFeed( final DocumentCollection collection, final CosmosChangeFeedRequestOptions changeFeedOptions) { checkNotNull(collection, "Argument 'collection' must not be null."); ChangeFeedQueryImpl<Document> changeFeedQueryImpl = new ChangeFeedQueryImpl<>( this, ResourceType.Document, Document.class, collection.getAltLink(), collection.getResourceId(), changeFeedOptions); return changeFeedQueryImpl.executeAsync(); } @Override public Flux<FeedResponse<Document>> readAllDocuments( String collectionLink, PartitionKey partitionKey, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (partitionKey == null) { throw new IllegalArgumentException("partitionKey"); } RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Query, ResourceType.Document, collectionLink, null ); Flux<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request).flux(); return collectionObs.flatMap(documentCollectionResourceResponse -> { DocumentCollection collection = documentCollectionResourceResponse.v; if (collection == null) { throw new IllegalStateException("Collection cannot be null"); } PartitionKeyDefinition pkDefinition = collection.getPartitionKey(); String pkSelector = createPkSelector(pkDefinition); SqlQuerySpec querySpec = createLogicalPartitionScanQuerySpec(partitionKey, pkSelector); String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document); UUID activityId = Utils.randomUUID(); IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options)); final CosmosQueryRequestOptions effectiveOptions = ModelBridgeInternal.createQueryRequestOptions(options); InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy( this.collectionCache, null, resourceLink, ModelBridgeInternal.getPropertiesFromQueryRequestOptions(effectiveOptions)); return ObservableHelper.fluxInlineIfPossibleAsObs( () -> { Flux<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = this.partitionKeyRangeCache .tryLookupAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collection.getResourceId(), null, null).flux(); return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> { CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v; if (routingMap == null) { throw new IllegalStateException("Failed to get routing map."); } String effectivePartitionKeyString = PartitionKeyInternalHelper .getEffectivePartitionKeyString( BridgeInternal.getPartitionKeyInternal(partitionKey), pkDefinition); PartitionKeyRange range = routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString); return createQueryInternal( resourceLink, querySpec, ModelBridgeInternal.setPartitionKeyRangeIdInternal(effectiveOptions, range.getId()), Document.class, ResourceType.Document, queryClient, activityId); }); }, invalidPartitionExceptionRetryPolicy); }); } @Override public Map<String, PartitionedQueryExecutionInfo> getQueryPlanCache() { return queryPlanCache; } @Override public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class, Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT)); } private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (storedProcedure == null) { throw new IllegalArgumentException("storedProcedure"); } validateResource(storedProcedure); String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options); return request; } private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (udf == null) { throw new IllegalArgumentException("udf"); } validateResource(udf); String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options); return request; } @Override public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink, StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]", collectionLink, storedProcedure.getId()); RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options, OperationType.Create); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedure(String collectionLink, StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedureInternal(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]", collectionLink, storedProcedure.getId()); RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in upserting a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (storedProcedure == null) { throw new IllegalArgumentException("storedProcedure"); } logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId()); RxDocumentClientImpl.validateResource(storedProcedure); String path = Utils.joinPath(storedProcedure.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(storedProcedureLink)) { throw new IllegalArgumentException("storedProcedureLink"); } logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.StoredProcedure, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(storedProcedureLink)) { throw new IllegalArgumentException("storedProcedureLink"); } logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.StoredProcedure, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.StoredProcedure, StoredProcedure.class, Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT)); } @Override public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query, CosmosQueryRequestOptions options) { return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(collectionLink, querySpec, options, StoredProcedure.class, ResourceType.StoredProcedure); } @Override public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink, List<Object> procedureParams) { return this.executeStoredProcedure(storedProcedureLink, null, procedureParams); } @Override public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink, RequestOptions options, List<Object> procedureParams) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy); } @Override public Mono<CosmosBatchResponse> executeBatchRequest(String collectionLink, ServerBatchRequest serverBatchRequest, RequestOptions options, boolean disableAutomaticIdGeneration) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> executeBatchRequestInternal(collectionLink, serverBatchRequest, options, documentClientRetryPolicy, disableAutomaticIdGeneration), documentClientRetryPolicy); } private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink, RequestOptions options, List<Object> procedureParams, DocumentClientRetryPolicy retryPolicy) { try { logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.ExecuteJavaScript); requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.ExecuteJavaScript, ResourceType.StoredProcedure, path, procedureParams != null && !procedureParams.isEmpty() ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "", requestHeaders, options); if (retryPolicy != null) { retryPolicy.onBeforeSendRequest(request); } Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options); return reqObs.flatMap(req -> create(request, retryPolicy, getOperationContextAndListenerTuple(options)) .map(response -> { this.captureSessionToken(request, response); return toStoredProcedureResponse(response); })); } catch (Exception e) { logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } private Mono<CosmosBatchResponse> executeBatchRequestInternal(String collectionLink, ServerBatchRequest serverBatchRequest, RequestOptions options, DocumentClientRetryPolicy requestRetryPolicy, boolean disableAutomaticIdGeneration) { try { logger.debug("Executing a Batch request with number of operations {}", serverBatchRequest.getOperations().size()); Mono<RxDocumentServiceRequest> requestObs = getBatchDocumentRequest(requestRetryPolicy, collectionLink, serverBatchRequest, options, disableAutomaticIdGeneration); Mono<RxDocumentServiceResponse> responseObservable = requestObs.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options))); return responseObservable .map(serviceResponse -> BatchResponseParser.fromDocumentServiceResponse(serviceResponse, serverBatchRequest, true)); } catch (Exception ex) { logger.debug("Failure in executing a batch due to [{}]", ex.getMessage(), ex); return Mono.error(ex); } } @Override public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink, trigger.getId()); RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options, OperationType.Create); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Trigger>> upsertTrigger(String collectionLink, Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> upsertTriggerInternal(String collectionLink, Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a Trigger. collectionLink [{}], trigger id [{}]", collectionLink, trigger.getId()); RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options, OperationType.Upsert); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in upserting a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (trigger == null) { throw new IllegalArgumentException("trigger"); } RxDocumentClientImpl.validateResource(trigger); String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.Trigger, path, trigger, requestHeaders, options); return request; } @Override public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (trigger == null) { throw new IllegalArgumentException("trigger"); } logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId()); RxDocumentClientImpl.validateResource(trigger); String path = Utils.joinPath(trigger.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Trigger, path, trigger, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(triggerLink)) { throw new IllegalArgumentException("triggerLink"); } logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink); String path = Utils.joinPath(triggerLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Trigger, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(triggerLink)) { throw new IllegalArgumentException("triggerLink"); } logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink); String path = Utils.joinPath(triggerLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Trigger, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.Trigger, Trigger.class, Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query, CosmosQueryRequestOptions options) { return queryTriggers(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(collectionLink, querySpec, options, Trigger.class, ResourceType.Trigger); } @Override public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink, UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink, UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink, udf.getId()); RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options, OperationType.Create); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunction(String collectionLink, UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunctionInternal(String collectionLink, UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink, udf.getId()); RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options, OperationType.Upsert); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in upserting a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (udf == null) { throw new IllegalArgumentException("udf"); } logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId()); validateResource(udf); String path = Utils.joinPath(udf.getSelfLink(), null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(udfLink)) { throw new IllegalArgumentException("udfLink"); } logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink); String path = Utils.joinPath(udfLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.UserDefinedFunction, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(udfLink)) { throw new IllegalArgumentException("udfLink"); } logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink); String path = Utils.joinPath(udfLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.UserDefinedFunction, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.UserDefinedFunction, UserDefinedFunction.class, Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink, String query, CosmosQueryRequestOptions options) { return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(collectionLink, querySpec, options, UserDefinedFunction.class, ResourceType.UserDefinedFunction); } @Override public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(conflictLink)) { throw new IllegalArgumentException("conflictLink"); } logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink); String path = Utils.joinPath(conflictLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Conflict, path, requestHeaders, options); Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options); return reqObs.flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class)); }); } catch (Exception e) { logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.Conflict, Conflict.class, Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query, CosmosQueryRequestOptions options) { return queryConflicts(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(collectionLink, querySpec, options, Conflict.class, ResourceType.Conflict); } @Override public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(conflictLink)) { throw new IllegalArgumentException("conflictLink"); } logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink); String path = Utils.joinPath(conflictLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Conflict, path, requestHeaders, options); Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options); return reqObs.flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Conflict.class)); }); } catch (Exception e) { logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy); } private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId()); RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create); return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId()); RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (user == null) { throw new IllegalArgumentException("user"); } RxDocumentClientImpl.validateResource(user); String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.User, path, user, requestHeaders, options); return request; } @Override public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (user == null) { throw new IllegalArgumentException("user"); } logger.debug("Replacing a User. user id [{}]", user.getId()); RxDocumentClientImpl.validateResource(user); String path = Utils.joinPath(user.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.User, path, user, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } logger.debug("Deleting a User. userLink [{}]", userLink); String path = Utils.joinPath(userLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.User, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } logger.debug("Reading a User. userLink [{}]", userLink); String path = Utils.joinPath(userLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.User, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<User>> readUsers(String databaseLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return readFeed(options, ResourceType.User, User.class, Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, CosmosQueryRequestOptions options) { return queryUsers(databaseLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(databaseLink, querySpec, options, User.class, ResourceType.User); } @Override public Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKey(String clientEncryptionKeyLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readClientEncryptionKeyInternal(clientEncryptionKeyLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKeyInternal(String clientEncryptionKeyLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(clientEncryptionKeyLink)) { throw new IllegalArgumentException("clientEncryptionKeyLink"); } logger.debug("Reading a client encryption key. clientEncryptionKeyLink [{}]", clientEncryptionKeyLink); String path = Utils.joinPath(clientEncryptionKeyLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.ClientEncryptionKey, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class)); } catch (Exception e) { logger.debug("Failure in reading a client encryption key due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKey(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createClientEncryptionKeyInternal(databaseLink, clientEncryptionKey, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKeyInternal(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a client encryption key. databaseLink [{}], clientEncryptionKey id [{}]", databaseLink, clientEncryptionKey.getId()); RxDocumentServiceRequest request = getClientEncryptionKeyRequest(databaseLink, clientEncryptionKey, options, OperationType.Create); return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, ClientEncryptionKey.class)); } catch (Exception e) { logger.debug("Failure in creating a client encryption key due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getClientEncryptionKeyRequest(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (clientEncryptionKey == null) { throw new IllegalArgumentException("clientEncryptionKey"); } RxDocumentClientImpl.validateResource(clientEncryptionKey); String path = Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options); return request; } @Override public Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKey(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceClientEncryptionKeyInternal(clientEncryptionKey, nameBasedLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKeyInternal(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (clientEncryptionKey == null) { throw new IllegalArgumentException("clientEncryptionKey"); } logger.debug("Replacing a clientEncryptionKey. clientEncryptionKey id [{}]", clientEncryptionKey.getId()); RxDocumentClientImpl.validateResource(clientEncryptionKey); String path = Utils.joinPath(nameBasedLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class)); } catch (Exception e) { logger.debug("Failure in replacing a clientEncryptionKey due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<ClientEncryptionKey>> readClientEncryptionKeys(String databaseLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return readFeed(options, ResourceType.ClientEncryptionKey, ClientEncryptionKey.class, Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT)); } @Override public Flux<FeedResponse<ClientEncryptionKey>> queryClientEncryptionKeys(String databaseLink, String query, CosmosQueryRequestOptions options) { return queryClientEncryptionKeys(databaseLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<ClientEncryptionKey>> queryClientEncryptionKeys(String databaseLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(databaseLink, querySpec, options, ClientEncryptionKey.class, ResourceType.ClientEncryptionKey); } @Override public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy()); } private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId()); RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options, OperationType.Create); return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId()); RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } if (permission == null) { throw new IllegalArgumentException("permission"); } RxDocumentClientImpl.validateResource(permission); String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.Permission, path, permission, requestHeaders, options); return request; } @Override public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (permission == null) { throw new IllegalArgumentException("permission"); } logger.debug("Replacing a Permission. permission id [{}]", permission.getId()); RxDocumentClientImpl.validateResource(permission); String path = Utils.joinPath(permission.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Permission, path, permission, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(permissionLink)) { throw new IllegalArgumentException("permissionLink"); } logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink); String path = Utils.joinPath(permissionLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Permission, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) { try { if (StringUtils.isEmpty(permissionLink)) { throw new IllegalArgumentException("permissionLink"); } logger.debug("Reading a Permission. permissionLink [{}]", permissionLink); String path = Utils.joinPath(permissionLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Permission, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Permission>> readPermissions(String userLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } return readFeed(options, ResourceType.Permission, Permission.class, Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query, CosmosQueryRequestOptions options) { return queryPermissions(userLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(userLink, querySpec, options, Permission.class, ResourceType.Permission); } @Override public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy); } private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) { try { if (offer == null) { throw new IllegalArgumentException("offer"); } logger.debug("Replacing an Offer. offer id [{}]", offer.getId()); RxDocumentClientImpl.validateResource(offer); String path = Utils.joinPath(offer.getSelfLink(), null); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Offer, path, offer, null, null); return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class)); } catch (Exception e) { logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Offer>> readOffer(String offerLink) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(offerLink)) { throw new IllegalArgumentException("offerLink"); } logger.debug("Reading an Offer. offerLink [{}]", offerLink); String path = Utils.joinPath(offerLink, null); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Offer, path, (HashMap<String, String>)null, null); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class)); } catch (Exception e) { logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Offer>> readOffers(CosmosQueryRequestOptions options) { return readFeed(options, ResourceType.Offer, Offer.class, Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null)); } private <T extends Resource> Flux<FeedResponse<T>> readFeed(CosmosQueryRequestOptions options, ResourceType resourceType, Class<T> klass, String resourceLink) { if (options == null) { options = new CosmosQueryRequestOptions(); } Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(options); int maxPageSize = maxItemCount != null ? maxItemCount : -1; final CosmosQueryRequestOptions finalCosmosQueryRequestOptions = options; DocumentClientRetryPolicy retryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> { Map<String, String> requestHeaders = new HashMap<>(); if (continuationToken != null) { requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken); } requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize)); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, finalCosmosQueryRequestOptions); retryPolicy.onBeforeSendRequest(request); return request; }; Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc = request -> ObservableHelper .inlineIfPossibleAsObs(() -> readFeed(request).map(response -> toFeedResponsePage(response, klass)), retryPolicy); return Paginator.getPaginatedQueryResultAsObservable(options, createRequestFunc, executeFunc, klass, maxPageSize); } @Override public Flux<FeedResponse<Offer>> queryOffers(String query, CosmosQueryRequestOptions options) { return queryOffers(new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(null, querySpec, options, Offer.class, ResourceType.Offer); } @Override public Mono<DatabaseAccount> getDatabaseAccount() { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy), documentClientRetryPolicy); } @Override public DatabaseAccount getLatestDatabaseAccount() { return this.globalEndpointManager.getLatestDatabaseAccount(); } private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Getting Database Account"); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.DatabaseAccount, "", (HashMap<String, String>) null, null); return this.read(request, documentClientRetryPolicy).map(ModelBridgeInternal::toDatabaseAccount); } catch (Exception e) { logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e); return Mono.error(e); } } public Object getSession() { return this.sessionContainer; } public void setSession(Object sessionContainer) { this.sessionContainer = (SessionContainer) sessionContainer; } @Override public RxClientCollectionCache getCollectionCache() { return this.collectionCache; } @Override public RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return partitionKeyRangeCache; } public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) { return Flux.defer(() -> { RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.DatabaseAccount, "", null, (Object) null); return this.populateHeaders(request, RequestVerb.GET) .flatMap(requestPopulated -> { requestPopulated.setEndpointOverride(endpoint); return this.gatewayProxy.processMessage(requestPopulated).doOnError(e -> { String message = String.format("Failed to retrieve database account information. %s", e.getCause() != null ? e.getCause().toString() : e.toString()); logger.warn(message); }).map(rsp -> rsp.getResource(DatabaseAccount.class)) .doOnNext(databaseAccount -> this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount)); }); }); } /** * Certain requests must be routed through gateway even when the client connectivity mode is direct. * * @param request * @return RxStoreModel */ private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) { if (request.UseGatewayMode) { return this.gatewayProxy; } ResourceType resourceType = request.getResourceType(); OperationType operationType = request.getOperationType(); if (resourceType == ResourceType.Offer || resourceType == ResourceType.ClientEncryptionKey || resourceType.isScript() && operationType != OperationType.ExecuteJavaScript || resourceType == ResourceType.PartitionKeyRange || resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) { return this.gatewayProxy; } if (operationType == OperationType.Create || operationType == OperationType.Upsert) { if (resourceType == ResourceType.Database || resourceType == ResourceType.User || resourceType == ResourceType.DocumentCollection || resourceType == ResourceType.Permission) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Delete) { if (resourceType == ResourceType.Database || resourceType == ResourceType.User || resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Replace) { if (resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Read) { if (resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else { if ((operationType == OperationType.Query || operationType == OperationType.SqlQuery || operationType == OperationType.ReadFeed) && Utils.isCollectionChild(request.getResourceType())) { if (request.getPartitionKeyRangeIdentity() == null && request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY) == null) { return this.gatewayProxy; } } return this.storeModel; } } @Override public void close() { logger.info("Attempting to close client {}", this.clientId); if (!closed.getAndSet(true)) { logger.info("Shutting down ..."); logger.info("Closing Global Endpoint Manager ..."); LifeCycleUtils.closeQuietly(this.globalEndpointManager); logger.info("Closing StoreClientFactory ..."); LifeCycleUtils.closeQuietly(this.storeClientFactory); logger.info("Shutting down reactorHttpClient ..."); LifeCycleUtils.closeQuietly(this.reactorHttpClient); logger.info("Shutting down CpuMonitor ..."); CpuMemoryMonitor.unregister(this); if (this.throughputControlEnabled.get()) { logger.info("Closing ThroughputControlStore ..."); this.throughputControlStore.close(); } logger.info("Shutting down completed."); } else { logger.warn("Already shutdown!"); } } @Override public ItemDeserializer getItemDeserializer() { return this.itemDeserializer; } @Override public synchronized void enableThroughputControlGroup(ThroughputControlGroupInternal group) { checkNotNull(group, "Throughput control group can not be null"); if (this.throughputControlEnabled.compareAndSet(false, true)) { this.throughputControlStore = new ThroughputControlStore( this.collectionCache, this.connectionPolicy.getConnectionMode(), this.partitionKeyRangeCache); this.storeModel.enableThroughputControl(throughputControlStore); } this.throughputControlStore.enableThroughputControlGroup(group); } private static SqlQuerySpec createLogicalPartitionScanQuerySpec( PartitionKey partitionKey, String partitionKeySelector) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE"); Object pkValue = ModelBridgeInternal.getPartitionKeyObject(partitionKey); String pkParamName = "@pkValue"; parameters.add(new SqlParameter(pkParamName, pkValue)); queryStringBuilder.append(" c"); queryStringBuilder.append(partitionKeySelector); queryStringBuilder.append((" = ")); queryStringBuilder.append(pkParamName); return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } @Override public Mono<List<FeedRange>> getFeedRanges(String collectionLink) { InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy( this.collectionCache, null, collectionLink, new HashMap<>()); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( this, OperationType.Query, ResourceType.Document, collectionLink, null); invalidPartitionExceptionRetryPolicy.onBeforeSendRequest(request); return ObservableHelper.inlineIfPossibleAsObs( () -> getFeedRangesInternal(request, collectionLink), invalidPartitionExceptionRetryPolicy); } private Mono<List<FeedRange>> getFeedRangesInternal(RxDocumentServiceRequest request, String collectionLink) { logger.debug("getFeedRange collectionLink=[{}]", collectionLink); if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request); return collectionObs.flatMap(documentCollectionResourceResponse -> { final DocumentCollection collection = documentCollectionResourceResponse.v; if (collection == null) { throw new IllegalStateException("Collection cannot be null"); } Mono<Utils.ValueHolder<List<PartitionKeyRange>>> valueHolderMono = partitionKeyRangeCache .tryGetOverlappingRangesAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collection.getResourceId(), RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES, true, null); return valueHolderMono.map(partitionKeyRangeList -> toFeedRanges(partitionKeyRangeList, request)); }); } private static List<FeedRange> toFeedRanges( Utils.ValueHolder<List<PartitionKeyRange>> partitionKeyRangeListValueHolder, RxDocumentServiceRequest request) { final List<PartitionKeyRange> partitionKeyRangeList = partitionKeyRangeListValueHolder.v; if (partitionKeyRangeList == null) { request.forceNameCacheRefresh = true; throw new InvalidPartitionException(); } List<FeedRange> feedRanges = new ArrayList<>(); partitionKeyRangeList.forEach(pkRange -> feedRanges.add(toFeedRange(pkRange))); return feedRanges; } private static FeedRange toFeedRange(PartitionKeyRange pkRange) { return new FeedRangeEpkImpl(pkRange.toRange()); } }
what happens if feedRange is configured in the query? i.e., `QueryFeedOptions.setFeedRange()` if FeeRange is set then is it guaranteed this code work? Could you please add a test for this scenario?
private Mono<Void> applySessionToken(RxDocumentServiceRequest request) { Map<String, String> headers = request.getHeaders(); Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null"); if (isMasterOperation(request.getResourceType(), request.getOperationType())) { if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return Mono.empty(); } if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { return Mono.empty(); } boolean sessionConsistency = RequestHelper.getConsistencyLevelToUse(this.gatewayServiceConfigurationReader, request) == ConsistencyLevel.SESSION; if (!sessionConsistency || (!request.isReadOnlyRequest() && request.getOperationType() != OperationType.Batch && !this.useMultipleWriteLocations)) { return Mono.empty(); } String partitionKeyRangeId = request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID); PartitionKeyInternal partitionKeyInternal = request.getPartitionKeyInternal(); if (StringUtils.isNotEmpty(partitionKeyRangeId)) { SessionTokenHelper.setPartitionLocalSessionToken(request, partitionKeyRangeId, sessionContainer); return Mono.empty(); } else if (partitionKeyInternal != null) { return this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request). flatMap(collectionValueHolder -> partitionKeyRangeCache.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collectionValueHolder.v.getResourceId(), null, null).flatMap(collectionRoutingMapValueHolder -> { String effectivePartitionKeyString = PartitionKeyInternalHelper .getEffectivePartitionKeyString( partitionKeyInternal, collectionValueHolder.v.getPartitionKey()); PartitionKeyRange range = collectionRoutingMapValueHolder.v.getRangeByEffectivePartitionKey(effectivePartitionKeyString); request.requestContext.resolvedPartitionKeyRange = range; SessionTokenHelper.setPartitionLocalSessionToken(request, sessionContainer); return Mono.empty(); })); } else { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } return Mono.empty(); } }
String partitionKeyRangeId = request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID);
private Mono<Void> applySessionToken(RxDocumentServiceRequest request) { Map<String, String> headers = request.getHeaders(); Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null"); if (isMasterOperation(request.getResourceType(), request.getOperationType())) { if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return Mono.empty(); } boolean sessionConsistency = RequestHelper.getConsistencyLevelToUse(this.gatewayServiceConfigurationReader, request) == ConsistencyLevel.SESSION; if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { if (!sessionConsistency || (!request.isReadOnlyRequest() && request.getOperationType() != OperationType.Batch && !this.useMultipleWriteLocations)){ request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return Mono.empty(); } if (!sessionConsistency || (!request.isReadOnlyRequest() && request.getOperationType() != OperationType.Batch && !this.useMultipleWriteLocations)) { return Mono.empty(); } if (this.collectionCache != null && this.partitionKeyRangeCache != null) { return this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request). flatMap(collectionValueHolder -> { if(collectionValueHolder== null || collectionValueHolder.v == null) { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } return Mono.empty(); } return partitionKeyRangeCache.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collectionValueHolder.v.getResourceId(), null, null).flatMap(collectionRoutingMapValueHolder -> { if (collectionRoutingMapValueHolder == null || collectionRoutingMapValueHolder.v == null) { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } return Mono.empty(); } String partitionKeyRangeId = request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID); PartitionKeyInternal partitionKeyInternal = request.getPartitionKeyInternal(); if (StringUtils.isNotEmpty(partitionKeyRangeId)) { PartitionKeyRange range = collectionRoutingMapValueHolder.v.getRangeByPartitionKeyRangeId(partitionKeyRangeId); request.requestContext.resolvedPartitionKeyRange = range; if (request.requestContext.resolvedPartitionKeyRange == null) { SessionTokenHelper.setPartitionLocalSessionToken(request, partitionKeyRangeId, sessionContainer); } else { SessionTokenHelper.setPartitionLocalSessionToken(request, sessionContainer); } } else if (partitionKeyInternal != null) { String effectivePartitionKeyString = PartitionKeyInternalHelper .getEffectivePartitionKeyString( partitionKeyInternal, collectionValueHolder.v.getPartitionKey()); PartitionKeyRange range = collectionRoutingMapValueHolder.v.getRangeByEffectivePartitionKey(effectivePartitionKeyString); request.requestContext.resolvedPartitionKeyRange = range; SessionTokenHelper.setPartitionLocalSessionToken(request, sessionContainer); } else { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } } return Mono.empty(); }); }); } else { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } return Mono.empty(); } }
class RxGatewayStoreModel implements RxStoreModel { private final static byte[] EMPTY_BYTE_ARRAY = {}; private final DiagnosticsClientContext clientContext; private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class); private final Map<String, String> defaultHeaders; private final HttpClient httpClient; private final QueryCompatibilityMode queryCompatibilityMode; private final GlobalEndpointManager globalEndpointManager; private ConsistencyLevel defaultConsistencyLevel; private ISessionContainer sessionContainer; private ThroughputControlStore throughputControlStore; private boolean useMultipleWriteLocations; private RxPartitionKeyRangeCache partitionKeyRangeCache; private GatewayServiceConfigurationReader gatewayServiceConfigurationReader; private RxClientCollectionCache collectionCache; public RxGatewayStoreModel( DiagnosticsClientContext clientContext, ISessionContainer sessionContainer, ConsistencyLevel defaultConsistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient) { this.clientContext = clientContext; this.defaultHeaders = new HashMap<>(); this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL, "no-cache"); this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); if (userAgentContainer == null) { userAgentContainer = new UserAgentContainer(); } this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent()); if (defaultConsistencyLevel != null) { this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, defaultConsistencyLevel.toString()); } this.defaultConsistencyLevel = defaultConsistencyLevel; this.globalEndpointManager = globalEndpointManager; this.queryCompatibilityMode = queryCompatibilityMode; this.httpClient = httpClient; this.sessionContainer = sessionContainer; } void setGatewayServiceConfigurationReader(GatewayServiceConfigurationReader gatewayServiceConfigurationReader) { this.gatewayServiceConfigurationReader = gatewayServiceConfigurationReader; } public void setPartitionKeyRangeCache(RxPartitionKeyRangeCache partitionKeyRangeCache) { this.partitionKeyRangeCache = partitionKeyRangeCache; } public void setUseMultipleWriteLocations(boolean useMultipleWriteLocations) { this.useMultipleWriteLocations = useMultipleWriteLocations; } public void setCollectionCache(RxClientCollectionCache collectionCache) { this.collectionCache = collectionCache; } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PATCH); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PUT); } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.DELETE); } private Mono<RxDocumentServiceResponse> deleteByPartitionKey(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { if(request.getOperationType() != OperationType.QueryPlan) { request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true"); } switch (this.queryCompatibilityMode) { case SqlQuery: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.SQL); break; case Default: case Query: default: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.QUERY_JSON); break; } return this.performRequest(request, HttpMethod.POST); } public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) { try { if (request.requestContext.cosmosDiagnostics == null) { request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics(); } URI uri = getUri(request); request.requestContext.resourcePhysicalAddress = uri.toString(); if (this.throughputControlStore != null) { return this.throughputControlStore.processRequest(request, performRequestInternal(request, method, uri)); } return this.performRequestInternal(request, method, uri); } catch (Exception e) { return Mono.error(e); } } /** * Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse. * * @param request * @param method * @param requestUri * @return Flux<RxDocumentServiceResponse> */ public Mono<RxDocumentServiceResponse> performRequestInternal(RxDocumentServiceRequest request, HttpMethod method, URI requestUri) { try { HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders()); Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux(); HttpRequest httpRequest = new HttpRequest(method, requestUri, requestUri.getPort(), httpHeaders, contentAsByteArray); Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds()); if (OperationType.QueryPlan.equals(request.getOperationType())) { responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds()); } else if (request.isAddressRefresh()) { responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()); } Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout); return toDocumentServiceResponse(httpResponseMono, request, httpRequest); } catch (Exception e) { return Mono.error(e); } } private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) { HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size()); for (Entry<String, String> entry : this.defaultHeaders.entrySet()) { if (!headers.containsKey(entry.getKey())) { httpHeaders.set(entry.getKey(), entry.getValue()); } } if (headers != null) { for (Entry<String, String> entry : headers.entrySet()) { if (entry.getValue() == null) { httpHeaders.set(entry.getKey(), ""); } else { httpHeaders.set(entry.getKey(), entry.getValue()); } } } return httpHeaders; } private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException { URI rootUri = request.getEndpointOverride(); if (rootUri == null) { if (request.getIsMedia()) { rootUri = this.globalEndpointManager.getWriteEndpoints().get(0); } else { rootUri = this.globalEndpointManager.resolveServiceEndpoint(request); } } String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed); if(request.getResourceType().equals(ResourceType.DatabaseAccount)) { path = StringUtils.EMPTY; } return new URI("https", null, rootUri.getHost(), rootUri.getPort(), ensureSlashPrefixed(path), null, null); } private String ensureSlashPrefixed(String path) { if (path == null) { return null; } if (path.startsWith("/")) { return path; } return "/" + path; } /** * Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable. * * * Once the customer code subscribes to the observable returned by the CRUD APIs, * the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made. * * @param httpResponseMono * @param request * @return {@link Mono} */ private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono, RxDocumentServiceRequest request, HttpRequest httpRequest) { return httpResponseMono.flatMap(httpResponse -> { HttpHeaders httpResponseHeaders = httpResponse.headers(); int httpResponseStatus = httpResponse.statusCode(); Mono<byte[]> contentObservable = httpResponse .bodyAsByteArray() .switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY)); return contentObservable .map(content -> { ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord(); if (reactorNettyRequestRecord != null) { reactorNettyRequestRecord.setTimeCompleted(Instant.now()); BridgeInternal.setGatewayRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, reactorNettyRequestRecord.takeTimelineSnapshot()); } validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content); StoreResponse rsp = new StoreResponse(httpResponseStatus, HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()), content); DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot()); if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null); DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics); } return rsp; }) .single(); }).map(rsp -> { if (httpRequest.reactorNettyRequestRecord() != null) { return new RxDocumentServiceResponse(this.clientContext, rsp, httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot()); } else { return new RxDocumentServiceResponse(this.clientContext, rsp); } }).onErrorResume(throwable -> { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (!(unwrappedException instanceof Exception)) { logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException); return Mono.error(unwrappedException); } Exception exception = (Exception) unwrappedException; CosmosException dce; if (!(exception instanceof CosmosException)) { logger.error("Network failure", exception); dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, 0, exception); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); } else { dce = (CosmosException) exception; } if (WebExceptionUtility.isNetworkFailure(dce)) { if (WebExceptionUtility.isReadTimeoutException(dce)) { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); } else { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE); } } if (request.requestContext.cosmosDiagnostics != null) { if (BridgeInternal.getClientSideRequestStatics(request.requestContext.cosmosDiagnostics).getGatewayRequestTimeline() == null && httpRequest.reactorNettyRequestRecord() != null) { BridgeInternal.setGatewayRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot()); } BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); }); } private void validateOrThrow(RxDocumentServiceRequest request, HttpResponseStatus status, HttpHeaders headers, byte[] bodyAsBytes) { int statusCode = status.code(); if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) { String statusCodeString = status.reasonPhrase() != null ? status.reasonPhrase().replace(" ", "") : ""; String body = bodyAsBytes != null ? new String(bodyAsBytes) : null; CosmosError cosmosError; cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError(); cosmosError = new CosmosError(statusCodeString, String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString), cosmosError.getPartitionedQueryExecutionInfo()); CosmosException dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, statusCode, cosmosError, headers.toMap()); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); throw dce; } } private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) { switch (request.getOperationType()) { case Create: case Batch: return this.create(request); case Patch: return this.patch(request); case Upsert: return this.upsert(request); case Delete: if (request.getResourceType() == ResourceType.PartitionKey) { return this.deleteByPartitionKey(request); } return this.delete(request); case ExecuteJavaScript: return this.execute(request); case Read: return this.read(request); case ReadFeed: return this.readFeed(request); case Replace: return this.replace(request); case SqlQuery: case Query: case QueryPlan: return this.query(request); default: throw new IllegalStateException("Unknown operation setType " + request.getOperationType()); } } private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) { Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single(); return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy(BridgeInternal.getRetryContext(request.requestContext.cosmosDiagnostics))); } @Override public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) { Mono<RxDocumentServiceResponse> responseObs = this.applySessionToken(request).then(invokeAsync(request)); return responseObs.onErrorResume( e -> { CosmosException dce = Utils.as(e, CosmosException.class); if (dce == null) { logger.error("unexpected failure {}", e.getMessage(), e); return Mono.error(e); } if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) && (dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED || dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT || ( dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND && !Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) { this.captureSessionToken(request, dce.getResponseHeaders()); } if (Exceptions.isThroughputControlRequestRateTooLargeException(dce)) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); } ).map(response -> { this.captureSessionToken(request, response.getResponseHeaders()); return response; } ); } @Override public void enableThroughputControl(ThroughputControlStore throughputControlStore) { } private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { if (request.getResourceType() == ResourceType.DocumentCollection && request.getOperationType() == OperationType.Delete) { String resourceId; if (request.getIsNameBased()) { resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID); } else { resourceId = request.getResourceId(); } this.sessionContainer.clearTokenByResourceId(resourceId); } else { this.sessionContainer.setSessionToken(request, responseHeaders); } } private static boolean isMasterOperation(ResourceType resourceType, OperationType operationType) { return ReplicatedResourceClientUtils.isMasterResource(resourceType) || isStoredProcedureMasterOperation(resourceType, operationType) || operationType == OperationType.QueryPlan; } private static boolean isStoredProcedureMasterOperation(ResourceType resourceType, OperationType operationType) { return resourceType == ResourceType.StoredProcedure && operationType != OperationType.ExecuteJavaScript; } }
class RxGatewayStoreModel implements RxStoreModel { private final static byte[] EMPTY_BYTE_ARRAY = {}; private final DiagnosticsClientContext clientContext; private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class); private final Map<String, String> defaultHeaders; private final HttpClient httpClient; private final QueryCompatibilityMode queryCompatibilityMode; private final GlobalEndpointManager globalEndpointManager; private ConsistencyLevel defaultConsistencyLevel; private ISessionContainer sessionContainer; private ThroughputControlStore throughputControlStore; private boolean useMultipleWriteLocations; private RxPartitionKeyRangeCache partitionKeyRangeCache; private GatewayServiceConfigurationReader gatewayServiceConfigurationReader; private RxClientCollectionCache collectionCache; public RxGatewayStoreModel( DiagnosticsClientContext clientContext, ISessionContainer sessionContainer, ConsistencyLevel defaultConsistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient, ApiType apiType) { this.clientContext = clientContext; this.defaultHeaders = new HashMap<>(); this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL, "no-cache"); this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); if (apiType != null){ this.defaultHeaders.put(HttpConstants.HttpHeaders.API_TYPE, apiType.toString()); } if (userAgentContainer == null) { userAgentContainer = new UserAgentContainer(); } this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent()); if (defaultConsistencyLevel != null) { this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, defaultConsistencyLevel.toString()); } this.defaultConsistencyLevel = defaultConsistencyLevel; this.globalEndpointManager = globalEndpointManager; this.queryCompatibilityMode = queryCompatibilityMode; this.httpClient = httpClient; this.sessionContainer = sessionContainer; } void setGatewayServiceConfigurationReader(GatewayServiceConfigurationReader gatewayServiceConfigurationReader) { this.gatewayServiceConfigurationReader = gatewayServiceConfigurationReader; } public void setPartitionKeyRangeCache(RxPartitionKeyRangeCache partitionKeyRangeCache) { this.partitionKeyRangeCache = partitionKeyRangeCache; } public void setUseMultipleWriteLocations(boolean useMultipleWriteLocations) { this.useMultipleWriteLocations = useMultipleWriteLocations; } boolean isUseMultipleWriteLocations() { return useMultipleWriteLocations; } RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return partitionKeyRangeCache; } GatewayServiceConfigurationReader getGatewayServiceConfigurationReader() { return gatewayServiceConfigurationReader; } RxClientCollectionCache getCollectionCache() { return collectionCache; } public void setCollectionCache(RxClientCollectionCache collectionCache) { this.collectionCache = collectionCache; } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PATCH); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PUT); } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.DELETE); } private Mono<RxDocumentServiceResponse> deleteByPartitionKey(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { if(request.getOperationType() != OperationType.QueryPlan) { request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true"); } switch (this.queryCompatibilityMode) { case SqlQuery: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.SQL); break; case Default: case Query: default: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.QUERY_JSON); break; } return this.performRequest(request, HttpMethod.POST); } public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) { try { if (request.requestContext.cosmosDiagnostics == null) { request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics(); } URI uri = getUri(request); request.requestContext.resourcePhysicalAddress = uri.toString(); if (this.throughputControlStore != null) { return this.throughputControlStore.processRequest(request, performRequestInternal(request, method, uri)); } return this.performRequestInternal(request, method, uri); } catch (Exception e) { return Mono.error(e); } } /** * Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse. * * @param request * @param method * @param requestUri * @return Flux<RxDocumentServiceResponse> */ public Mono<RxDocumentServiceResponse> performRequestInternal(RxDocumentServiceRequest request, HttpMethod method, URI requestUri) { try { HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders()); Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux(); HttpRequest httpRequest = new HttpRequest(method, requestUri, requestUri.getPort(), httpHeaders, contentAsByteArray); Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds()); if (OperationType.QueryPlan.equals(request.getOperationType())) { responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds()); } else if (request.isAddressRefresh()) { responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()); } Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout); return toDocumentServiceResponse(httpResponseMono, request, httpRequest); } catch (Exception e) { return Mono.error(e); } } private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) { HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size()); for (Entry<String, String> entry : this.defaultHeaders.entrySet()) { if (!headers.containsKey(entry.getKey())) { httpHeaders.set(entry.getKey(), entry.getValue()); } } if (headers != null) { for (Entry<String, String> entry : headers.entrySet()) { if (entry.getValue() == null) { httpHeaders.set(entry.getKey(), ""); } else { httpHeaders.set(entry.getKey(), entry.getValue()); } } } return httpHeaders; } private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException { URI rootUri = request.getEndpointOverride(); if (rootUri == null) { if (request.getIsMedia()) { rootUri = this.globalEndpointManager.getWriteEndpoints().get(0); } else { rootUri = this.globalEndpointManager.resolveServiceEndpoint(request); } } String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed); if(request.getResourceType().equals(ResourceType.DatabaseAccount)) { path = StringUtils.EMPTY; } return new URI("https", null, rootUri.getHost(), rootUri.getPort(), ensureSlashPrefixed(path), null, null); } private String ensureSlashPrefixed(String path) { if (path == null) { return null; } if (path.startsWith("/")) { return path; } return "/" + path; } /** * Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable. * * * Once the customer code subscribes to the observable returned by the CRUD APIs, * the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made. * * @param httpResponseMono * @param request * @return {@link Mono} */ private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono, RxDocumentServiceRequest request, HttpRequest httpRequest) { return httpResponseMono.flatMap(httpResponse -> { HttpHeaders httpResponseHeaders = httpResponse.headers(); int httpResponseStatus = httpResponse.statusCode(); Mono<byte[]> contentObservable = httpResponse .bodyAsByteArray() .switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY)); return contentObservable .map(content -> { ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord(); if (reactorNettyRequestRecord != null) { reactorNettyRequestRecord.setTimeCompleted(Instant.now()); BridgeInternal.setGatewayRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, reactorNettyRequestRecord.takeTimelineSnapshot()); } validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content); StoreResponse rsp = new StoreResponse(httpResponseStatus, HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()), content); DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot()); if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null); DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics); } return rsp; }) .single(); }).map(rsp -> { if (httpRequest.reactorNettyRequestRecord() != null) { return new RxDocumentServiceResponse(this.clientContext, rsp, httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot()); } else { return new RxDocumentServiceResponse(this.clientContext, rsp); } }).onErrorResume(throwable -> { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (!(unwrappedException instanceof Exception)) { logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException); return Mono.error(unwrappedException); } Exception exception = (Exception) unwrappedException; CosmosException dce; if (!(exception instanceof CosmosException)) { logger.error("Network failure", exception); dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, 0, exception); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); } else { dce = (CosmosException) exception; } if (WebExceptionUtility.isNetworkFailure(dce)) { if (WebExceptionUtility.isReadTimeoutException(dce)) { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); } else { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE); } } if (request.requestContext.cosmosDiagnostics != null) { if (BridgeInternal.getClientSideRequestStatics(request.requestContext.cosmosDiagnostics).getGatewayRequestTimeline() == null && httpRequest.reactorNettyRequestRecord() != null) { BridgeInternal.setGatewayRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot()); } BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); }); } private void validateOrThrow(RxDocumentServiceRequest request, HttpResponseStatus status, HttpHeaders headers, byte[] bodyAsBytes) { int statusCode = status.code(); if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) { String statusCodeString = status.reasonPhrase() != null ? status.reasonPhrase().replace(" ", "") : ""; String body = bodyAsBytes != null ? new String(bodyAsBytes) : null; CosmosError cosmosError; cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError(); cosmosError = new CosmosError(statusCodeString, String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString), cosmosError.getPartitionedQueryExecutionInfo()); CosmosException dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, statusCode, cosmosError, headers.toMap()); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); throw dce; } } private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) { switch (request.getOperationType()) { case Create: case Batch: return this.create(request); case Patch: return this.patch(request); case Upsert: return this.upsert(request); case Delete: if (request.getResourceType() == ResourceType.PartitionKey) { return this.deleteByPartitionKey(request); } return this.delete(request); case ExecuteJavaScript: return this.execute(request); case Read: return this.read(request); case ReadFeed: return this.readFeed(request); case Replace: return this.replace(request); case SqlQuery: case Query: case QueryPlan: return this.query(request); default: throw new IllegalStateException("Unknown operation setType " + request.getOperationType()); } } private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) { Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single(); return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy(BridgeInternal.getRetryContext(request.requestContext.cosmosDiagnostics))); } @Override public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) { Mono<RxDocumentServiceResponse> responseObs = this.addIntendedCollectionRidAndSessionToken(request).then(invokeAsync(request)); return responseObs.onErrorResume( e -> { CosmosException dce = Utils.as(e, CosmosException.class); if (dce == null) { logger.error("unexpected failure {}", e.getMessage(), e); return Mono.error(e); } if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) && (dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED || dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT || ( dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND && !Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) { this.captureSessionToken(request, dce.getResponseHeaders()); } if (Exceptions.isThroughputControlRequestRateTooLargeException(dce)) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); } ).flatMap(response -> this.captureSessionTokenAndHandlePartitionSplit(request, response.getResponseHeaders()).then(Mono.just(response)) ); } @Override public void enableThroughputControl(ThroughputControlStore throughputControlStore) { } private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { if (request.getResourceType() == ResourceType.DocumentCollection && request.getOperationType() == OperationType.Delete) { String resourceId; if (request.getIsNameBased()) { resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID); } else { resourceId = request.getResourceId(); } this.sessionContainer.clearTokenByResourceId(resourceId); } else { this.sessionContainer.setSessionToken(request, responseHeaders); } } private Mono<Void> captureSessionTokenAndHandlePartitionSplit(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { this.captureSessionToken(request, responseHeaders); if (request.requestContext.resolvedPartitionKeyRange != null && StringUtils.isNotEmpty(request.requestContext.resolvedCollectionRid) && StringUtils.isNotEmpty(responseHeaders.get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID)) && !responseHeaders.get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID).equals(request.requestContext.resolvedPartitionKeyRange.getId())) { return this.partitionKeyRangeCache.refreshAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request.requestContext.resolvedCollectionRid) .flatMap(collectionRoutingMapValueHolder -> Mono.empty()); } return Mono.empty(); } private Mono<Void> addIntendedCollectionRidAndSessionToken(RxDocumentServiceRequest request) { return applySessionToken(request).then(addIntendedCollectionRid(request)); } private Mono<Void> addIntendedCollectionRid(RxDocumentServiceRequest request) { if (this.collectionCache != null && request.getResourceType().equals(ResourceType.Document)) { return this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request).flatMap(documentCollectionValueHolder -> { if (StringUtils.isEmpty(request.getHeaders().get(INTENDED_COLLECTION_RID_HEADER))) { request.getHeaders().put(INTENDED_COLLECTION_RID_HEADER, request.requestContext.resolvedCollectionRid); } else { request.intendedCollectionRidPassedIntoSDK = true; } return Mono.empty(); }); } return Mono.empty(); } private static boolean isMasterOperation(ResourceType resourceType, OperationType operationType) { return ReplicatedResourceClientUtils.isMasterResource(resourceType) || isStoredProcedureMasterOperation(resourceType, operationType) || operationType == OperationType.QueryPlan; } private static boolean isStoredProcedureMasterOperation(ResourceType resourceType, OperationType operationType) { return resourceType == ResourceType.StoredProcedure && operationType != OperationType.ExecuteJavaScript; } }
NPE check, how can we be sure `collectionRoutingMapValueHolder.v` is not null? Collection delete scenario, or collection delete, recreate with the same name?
private Mono<Void> applySessionToken(RxDocumentServiceRequest request) { Map<String, String> headers = request.getHeaders(); Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null"); if (isMasterOperation(request.getResourceType(), request.getOperationType())) { if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return Mono.empty(); } if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { return Mono.empty(); } boolean sessionConsistency = RequestHelper.getConsistencyLevelToUse(this.gatewayServiceConfigurationReader, request) == ConsistencyLevel.SESSION; if (!sessionConsistency || (!request.isReadOnlyRequest() && request.getOperationType() != OperationType.Batch && !this.useMultipleWriteLocations)) { return Mono.empty(); } String partitionKeyRangeId = request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID); PartitionKeyInternal partitionKeyInternal = request.getPartitionKeyInternal(); if (StringUtils.isNotEmpty(partitionKeyRangeId)) { SessionTokenHelper.setPartitionLocalSessionToken(request, partitionKeyRangeId, sessionContainer); return Mono.empty(); } else if (partitionKeyInternal != null) { return this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request). flatMap(collectionValueHolder -> partitionKeyRangeCache.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collectionValueHolder.v.getResourceId(), null, null).flatMap(collectionRoutingMapValueHolder -> { String effectivePartitionKeyString = PartitionKeyInternalHelper .getEffectivePartitionKeyString( partitionKeyInternal, collectionValueHolder.v.getPartitionKey()); PartitionKeyRange range = collectionRoutingMapValueHolder.v.getRangeByEffectivePartitionKey(effectivePartitionKeyString); request.requestContext.resolvedPartitionKeyRange = range; SessionTokenHelper.setPartitionLocalSessionToken(request, sessionContainer); return Mono.empty(); })); } else { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } return Mono.empty(); } }
collectionRoutingMapValueHolder.v.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
private Mono<Void> applySessionToken(RxDocumentServiceRequest request) { Map<String, String> headers = request.getHeaders(); Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null"); if (isMasterOperation(request.getResourceType(), request.getOperationType())) { if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return Mono.empty(); } boolean sessionConsistency = RequestHelper.getConsistencyLevelToUse(this.gatewayServiceConfigurationReader, request) == ConsistencyLevel.SESSION; if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { if (!sessionConsistency || (!request.isReadOnlyRequest() && request.getOperationType() != OperationType.Batch && !this.useMultipleWriteLocations)){ request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return Mono.empty(); } if (!sessionConsistency || (!request.isReadOnlyRequest() && request.getOperationType() != OperationType.Batch && !this.useMultipleWriteLocations)) { return Mono.empty(); } if (this.collectionCache != null && this.partitionKeyRangeCache != null) { return this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request). flatMap(collectionValueHolder -> { if(collectionValueHolder== null || collectionValueHolder.v == null) { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } return Mono.empty(); } return partitionKeyRangeCache.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collectionValueHolder.v.getResourceId(), null, null).flatMap(collectionRoutingMapValueHolder -> { if (collectionRoutingMapValueHolder == null || collectionRoutingMapValueHolder.v == null) { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } return Mono.empty(); } String partitionKeyRangeId = request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID); PartitionKeyInternal partitionKeyInternal = request.getPartitionKeyInternal(); if (StringUtils.isNotEmpty(partitionKeyRangeId)) { PartitionKeyRange range = collectionRoutingMapValueHolder.v.getRangeByPartitionKeyRangeId(partitionKeyRangeId); request.requestContext.resolvedPartitionKeyRange = range; if (request.requestContext.resolvedPartitionKeyRange == null) { SessionTokenHelper.setPartitionLocalSessionToken(request, partitionKeyRangeId, sessionContainer); } else { SessionTokenHelper.setPartitionLocalSessionToken(request, sessionContainer); } } else if (partitionKeyInternal != null) { String effectivePartitionKeyString = PartitionKeyInternalHelper .getEffectivePartitionKeyString( partitionKeyInternal, collectionValueHolder.v.getPartitionKey()); PartitionKeyRange range = collectionRoutingMapValueHolder.v.getRangeByEffectivePartitionKey(effectivePartitionKeyString); request.requestContext.resolvedPartitionKeyRange = range; SessionTokenHelper.setPartitionLocalSessionToken(request, sessionContainer); } else { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } } return Mono.empty(); }); }); } else { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } return Mono.empty(); } }
class RxGatewayStoreModel implements RxStoreModel { private final static byte[] EMPTY_BYTE_ARRAY = {}; private final DiagnosticsClientContext clientContext; private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class); private final Map<String, String> defaultHeaders; private final HttpClient httpClient; private final QueryCompatibilityMode queryCompatibilityMode; private final GlobalEndpointManager globalEndpointManager; private ConsistencyLevel defaultConsistencyLevel; private ISessionContainer sessionContainer; private ThroughputControlStore throughputControlStore; private boolean useMultipleWriteLocations; private RxPartitionKeyRangeCache partitionKeyRangeCache; private GatewayServiceConfigurationReader gatewayServiceConfigurationReader; private RxClientCollectionCache collectionCache; public RxGatewayStoreModel( DiagnosticsClientContext clientContext, ISessionContainer sessionContainer, ConsistencyLevel defaultConsistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient) { this.clientContext = clientContext; this.defaultHeaders = new HashMap<>(); this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL, "no-cache"); this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); if (userAgentContainer == null) { userAgentContainer = new UserAgentContainer(); } this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent()); if (defaultConsistencyLevel != null) { this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, defaultConsistencyLevel.toString()); } this.defaultConsistencyLevel = defaultConsistencyLevel; this.globalEndpointManager = globalEndpointManager; this.queryCompatibilityMode = queryCompatibilityMode; this.httpClient = httpClient; this.sessionContainer = sessionContainer; } void setGatewayServiceConfigurationReader(GatewayServiceConfigurationReader gatewayServiceConfigurationReader) { this.gatewayServiceConfigurationReader = gatewayServiceConfigurationReader; } public void setPartitionKeyRangeCache(RxPartitionKeyRangeCache partitionKeyRangeCache) { this.partitionKeyRangeCache = partitionKeyRangeCache; } public void setUseMultipleWriteLocations(boolean useMultipleWriteLocations) { this.useMultipleWriteLocations = useMultipleWriteLocations; } public void setCollectionCache(RxClientCollectionCache collectionCache) { this.collectionCache = collectionCache; } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PATCH); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PUT); } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.DELETE); } private Mono<RxDocumentServiceResponse> deleteByPartitionKey(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { if(request.getOperationType() != OperationType.QueryPlan) { request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true"); } switch (this.queryCompatibilityMode) { case SqlQuery: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.SQL); break; case Default: case Query: default: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.QUERY_JSON); break; } return this.performRequest(request, HttpMethod.POST); } public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) { try { if (request.requestContext.cosmosDiagnostics == null) { request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics(); } URI uri = getUri(request); request.requestContext.resourcePhysicalAddress = uri.toString(); if (this.throughputControlStore != null) { return this.throughputControlStore.processRequest(request, performRequestInternal(request, method, uri)); } return this.performRequestInternal(request, method, uri); } catch (Exception e) { return Mono.error(e); } } /** * Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse. * * @param request * @param method * @param requestUri * @return Flux<RxDocumentServiceResponse> */ public Mono<RxDocumentServiceResponse> performRequestInternal(RxDocumentServiceRequest request, HttpMethod method, URI requestUri) { try { HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders()); Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux(); HttpRequest httpRequest = new HttpRequest(method, requestUri, requestUri.getPort(), httpHeaders, contentAsByteArray); Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds()); if (OperationType.QueryPlan.equals(request.getOperationType())) { responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds()); } else if (request.isAddressRefresh()) { responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()); } Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout); return toDocumentServiceResponse(httpResponseMono, request, httpRequest); } catch (Exception e) { return Mono.error(e); } } private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) { HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size()); for (Entry<String, String> entry : this.defaultHeaders.entrySet()) { if (!headers.containsKey(entry.getKey())) { httpHeaders.set(entry.getKey(), entry.getValue()); } } if (headers != null) { for (Entry<String, String> entry : headers.entrySet()) { if (entry.getValue() == null) { httpHeaders.set(entry.getKey(), ""); } else { httpHeaders.set(entry.getKey(), entry.getValue()); } } } return httpHeaders; } private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException { URI rootUri = request.getEndpointOverride(); if (rootUri == null) { if (request.getIsMedia()) { rootUri = this.globalEndpointManager.getWriteEndpoints().get(0); } else { rootUri = this.globalEndpointManager.resolveServiceEndpoint(request); } } String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed); if(request.getResourceType().equals(ResourceType.DatabaseAccount)) { path = StringUtils.EMPTY; } return new URI("https", null, rootUri.getHost(), rootUri.getPort(), ensureSlashPrefixed(path), null, null); } private String ensureSlashPrefixed(String path) { if (path == null) { return null; } if (path.startsWith("/")) { return path; } return "/" + path; } /** * Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable. * * * Once the customer code subscribes to the observable returned by the CRUD APIs, * the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made. * * @param httpResponseMono * @param request * @return {@link Mono} */ private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono, RxDocumentServiceRequest request, HttpRequest httpRequest) { return httpResponseMono.flatMap(httpResponse -> { HttpHeaders httpResponseHeaders = httpResponse.headers(); int httpResponseStatus = httpResponse.statusCode(); Mono<byte[]> contentObservable = httpResponse .bodyAsByteArray() .switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY)); return contentObservable .map(content -> { ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord(); if (reactorNettyRequestRecord != null) { reactorNettyRequestRecord.setTimeCompleted(Instant.now()); BridgeInternal.setGatewayRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, reactorNettyRequestRecord.takeTimelineSnapshot()); } validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content); StoreResponse rsp = new StoreResponse(httpResponseStatus, HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()), content); DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot()); if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null); DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics); } return rsp; }) .single(); }).map(rsp -> { if (httpRequest.reactorNettyRequestRecord() != null) { return new RxDocumentServiceResponse(this.clientContext, rsp, httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot()); } else { return new RxDocumentServiceResponse(this.clientContext, rsp); } }).onErrorResume(throwable -> { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (!(unwrappedException instanceof Exception)) { logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException); return Mono.error(unwrappedException); } Exception exception = (Exception) unwrappedException; CosmosException dce; if (!(exception instanceof CosmosException)) { logger.error("Network failure", exception); dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, 0, exception); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); } else { dce = (CosmosException) exception; } if (WebExceptionUtility.isNetworkFailure(dce)) { if (WebExceptionUtility.isReadTimeoutException(dce)) { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); } else { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE); } } if (request.requestContext.cosmosDiagnostics != null) { if (BridgeInternal.getClientSideRequestStatics(request.requestContext.cosmosDiagnostics).getGatewayRequestTimeline() == null && httpRequest.reactorNettyRequestRecord() != null) { BridgeInternal.setGatewayRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot()); } BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); }); } private void validateOrThrow(RxDocumentServiceRequest request, HttpResponseStatus status, HttpHeaders headers, byte[] bodyAsBytes) { int statusCode = status.code(); if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) { String statusCodeString = status.reasonPhrase() != null ? status.reasonPhrase().replace(" ", "") : ""; String body = bodyAsBytes != null ? new String(bodyAsBytes) : null; CosmosError cosmosError; cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError(); cosmosError = new CosmosError(statusCodeString, String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString), cosmosError.getPartitionedQueryExecutionInfo()); CosmosException dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, statusCode, cosmosError, headers.toMap()); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); throw dce; } } private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) { switch (request.getOperationType()) { case Create: case Batch: return this.create(request); case Patch: return this.patch(request); case Upsert: return this.upsert(request); case Delete: if (request.getResourceType() == ResourceType.PartitionKey) { return this.deleteByPartitionKey(request); } return this.delete(request); case ExecuteJavaScript: return this.execute(request); case Read: return this.read(request); case ReadFeed: return this.readFeed(request); case Replace: return this.replace(request); case SqlQuery: case Query: case QueryPlan: return this.query(request); default: throw new IllegalStateException("Unknown operation setType " + request.getOperationType()); } } private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) { Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single(); return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy(BridgeInternal.getRetryContext(request.requestContext.cosmosDiagnostics))); } @Override public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) { Mono<RxDocumentServiceResponse> responseObs = this.applySessionToken(request).then(invokeAsync(request)); return responseObs.onErrorResume( e -> { CosmosException dce = Utils.as(e, CosmosException.class); if (dce == null) { logger.error("unexpected failure {}", e.getMessage(), e); return Mono.error(e); } if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) && (dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED || dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT || ( dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND && !Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) { this.captureSessionToken(request, dce.getResponseHeaders()); } if (Exceptions.isThroughputControlRequestRateTooLargeException(dce)) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); } ).map(response -> { this.captureSessionToken(request, response.getResponseHeaders()); return response; } ); } @Override public void enableThroughputControl(ThroughputControlStore throughputControlStore) { } private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { if (request.getResourceType() == ResourceType.DocumentCollection && request.getOperationType() == OperationType.Delete) { String resourceId; if (request.getIsNameBased()) { resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID); } else { resourceId = request.getResourceId(); } this.sessionContainer.clearTokenByResourceId(resourceId); } else { this.sessionContainer.setSessionToken(request, responseHeaders); } } private static boolean isMasterOperation(ResourceType resourceType, OperationType operationType) { return ReplicatedResourceClientUtils.isMasterResource(resourceType) || isStoredProcedureMasterOperation(resourceType, operationType) || operationType == OperationType.QueryPlan; } private static boolean isStoredProcedureMasterOperation(ResourceType resourceType, OperationType operationType) { return resourceType == ResourceType.StoredProcedure && operationType != OperationType.ExecuteJavaScript; } }
class RxGatewayStoreModel implements RxStoreModel { private final static byte[] EMPTY_BYTE_ARRAY = {}; private final DiagnosticsClientContext clientContext; private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class); private final Map<String, String> defaultHeaders; private final HttpClient httpClient; private final QueryCompatibilityMode queryCompatibilityMode; private final GlobalEndpointManager globalEndpointManager; private ConsistencyLevel defaultConsistencyLevel; private ISessionContainer sessionContainer; private ThroughputControlStore throughputControlStore; private boolean useMultipleWriteLocations; private RxPartitionKeyRangeCache partitionKeyRangeCache; private GatewayServiceConfigurationReader gatewayServiceConfigurationReader; private RxClientCollectionCache collectionCache; public RxGatewayStoreModel( DiagnosticsClientContext clientContext, ISessionContainer sessionContainer, ConsistencyLevel defaultConsistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient, ApiType apiType) { this.clientContext = clientContext; this.defaultHeaders = new HashMap<>(); this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL, "no-cache"); this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); if (apiType != null){ this.defaultHeaders.put(HttpConstants.HttpHeaders.API_TYPE, apiType.toString()); } if (userAgentContainer == null) { userAgentContainer = new UserAgentContainer(); } this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent()); if (defaultConsistencyLevel != null) { this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, defaultConsistencyLevel.toString()); } this.defaultConsistencyLevel = defaultConsistencyLevel; this.globalEndpointManager = globalEndpointManager; this.queryCompatibilityMode = queryCompatibilityMode; this.httpClient = httpClient; this.sessionContainer = sessionContainer; } void setGatewayServiceConfigurationReader(GatewayServiceConfigurationReader gatewayServiceConfigurationReader) { this.gatewayServiceConfigurationReader = gatewayServiceConfigurationReader; } public void setPartitionKeyRangeCache(RxPartitionKeyRangeCache partitionKeyRangeCache) { this.partitionKeyRangeCache = partitionKeyRangeCache; } public void setUseMultipleWriteLocations(boolean useMultipleWriteLocations) { this.useMultipleWriteLocations = useMultipleWriteLocations; } boolean isUseMultipleWriteLocations() { return useMultipleWriteLocations; } RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return partitionKeyRangeCache; } GatewayServiceConfigurationReader getGatewayServiceConfigurationReader() { return gatewayServiceConfigurationReader; } RxClientCollectionCache getCollectionCache() { return collectionCache; } public void setCollectionCache(RxClientCollectionCache collectionCache) { this.collectionCache = collectionCache; } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PATCH); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PUT); } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.DELETE); } private Mono<RxDocumentServiceResponse> deleteByPartitionKey(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { if(request.getOperationType() != OperationType.QueryPlan) { request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true"); } switch (this.queryCompatibilityMode) { case SqlQuery: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.SQL); break; case Default: case Query: default: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.QUERY_JSON); break; } return this.performRequest(request, HttpMethod.POST); } public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) { try { if (request.requestContext.cosmosDiagnostics == null) { request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics(); } URI uri = getUri(request); request.requestContext.resourcePhysicalAddress = uri.toString(); if (this.throughputControlStore != null) { return this.throughputControlStore.processRequest(request, performRequestInternal(request, method, uri)); } return this.performRequestInternal(request, method, uri); } catch (Exception e) { return Mono.error(e); } } /** * Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse. * * @param request * @param method * @param requestUri * @return Flux<RxDocumentServiceResponse> */ public Mono<RxDocumentServiceResponse> performRequestInternal(RxDocumentServiceRequest request, HttpMethod method, URI requestUri) { try { HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders()); Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux(); HttpRequest httpRequest = new HttpRequest(method, requestUri, requestUri.getPort(), httpHeaders, contentAsByteArray); Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds()); if (OperationType.QueryPlan.equals(request.getOperationType())) { responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds()); } else if (request.isAddressRefresh()) { responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()); } Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout); return toDocumentServiceResponse(httpResponseMono, request, httpRequest); } catch (Exception e) { return Mono.error(e); } } private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) { HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size()); for (Entry<String, String> entry : this.defaultHeaders.entrySet()) { if (!headers.containsKey(entry.getKey())) { httpHeaders.set(entry.getKey(), entry.getValue()); } } if (headers != null) { for (Entry<String, String> entry : headers.entrySet()) { if (entry.getValue() == null) { httpHeaders.set(entry.getKey(), ""); } else { httpHeaders.set(entry.getKey(), entry.getValue()); } } } return httpHeaders; } private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException { URI rootUri = request.getEndpointOverride(); if (rootUri == null) { if (request.getIsMedia()) { rootUri = this.globalEndpointManager.getWriteEndpoints().get(0); } else { rootUri = this.globalEndpointManager.resolveServiceEndpoint(request); } } String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed); if(request.getResourceType().equals(ResourceType.DatabaseAccount)) { path = StringUtils.EMPTY; } return new URI("https", null, rootUri.getHost(), rootUri.getPort(), ensureSlashPrefixed(path), null, null); } private String ensureSlashPrefixed(String path) { if (path == null) { return null; } if (path.startsWith("/")) { return path; } return "/" + path; } /** * Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable. * * * Once the customer code subscribes to the observable returned by the CRUD APIs, * the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made. * * @param httpResponseMono * @param request * @return {@link Mono} */ private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono, RxDocumentServiceRequest request, HttpRequest httpRequest) { return httpResponseMono.flatMap(httpResponse -> { HttpHeaders httpResponseHeaders = httpResponse.headers(); int httpResponseStatus = httpResponse.statusCode(); Mono<byte[]> contentObservable = httpResponse .bodyAsByteArray() .switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY)); return contentObservable .map(content -> { ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord(); if (reactorNettyRequestRecord != null) { reactorNettyRequestRecord.setTimeCompleted(Instant.now()); BridgeInternal.setGatewayRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, reactorNettyRequestRecord.takeTimelineSnapshot()); } validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content); StoreResponse rsp = new StoreResponse(httpResponseStatus, HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()), content); DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot()); if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null); DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics); } return rsp; }) .single(); }).map(rsp -> { if (httpRequest.reactorNettyRequestRecord() != null) { return new RxDocumentServiceResponse(this.clientContext, rsp, httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot()); } else { return new RxDocumentServiceResponse(this.clientContext, rsp); } }).onErrorResume(throwable -> { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (!(unwrappedException instanceof Exception)) { logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException); return Mono.error(unwrappedException); } Exception exception = (Exception) unwrappedException; CosmosException dce; if (!(exception instanceof CosmosException)) { logger.error("Network failure", exception); dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, 0, exception); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); } else { dce = (CosmosException) exception; } if (WebExceptionUtility.isNetworkFailure(dce)) { if (WebExceptionUtility.isReadTimeoutException(dce)) { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); } else { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE); } } if (request.requestContext.cosmosDiagnostics != null) { if (BridgeInternal.getClientSideRequestStatics(request.requestContext.cosmosDiagnostics).getGatewayRequestTimeline() == null && httpRequest.reactorNettyRequestRecord() != null) { BridgeInternal.setGatewayRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot()); } BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); }); } private void validateOrThrow(RxDocumentServiceRequest request, HttpResponseStatus status, HttpHeaders headers, byte[] bodyAsBytes) { int statusCode = status.code(); if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) { String statusCodeString = status.reasonPhrase() != null ? status.reasonPhrase().replace(" ", "") : ""; String body = bodyAsBytes != null ? new String(bodyAsBytes) : null; CosmosError cosmosError; cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError(); cosmosError = new CosmosError(statusCodeString, String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString), cosmosError.getPartitionedQueryExecutionInfo()); CosmosException dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, statusCode, cosmosError, headers.toMap()); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); throw dce; } } private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) { switch (request.getOperationType()) { case Create: case Batch: return this.create(request); case Patch: return this.patch(request); case Upsert: return this.upsert(request); case Delete: if (request.getResourceType() == ResourceType.PartitionKey) { return this.deleteByPartitionKey(request); } return this.delete(request); case ExecuteJavaScript: return this.execute(request); case Read: return this.read(request); case ReadFeed: return this.readFeed(request); case Replace: return this.replace(request); case SqlQuery: case Query: case QueryPlan: return this.query(request); default: throw new IllegalStateException("Unknown operation setType " + request.getOperationType()); } } private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) { Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single(); return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy(BridgeInternal.getRetryContext(request.requestContext.cosmosDiagnostics))); } @Override public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) { Mono<RxDocumentServiceResponse> responseObs = this.addIntendedCollectionRidAndSessionToken(request).then(invokeAsync(request)); return responseObs.onErrorResume( e -> { CosmosException dce = Utils.as(e, CosmosException.class); if (dce == null) { logger.error("unexpected failure {}", e.getMessage(), e); return Mono.error(e); } if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) && (dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED || dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT || ( dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND && !Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) { this.captureSessionToken(request, dce.getResponseHeaders()); } if (Exceptions.isThroughputControlRequestRateTooLargeException(dce)) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); } ).flatMap(response -> this.captureSessionTokenAndHandlePartitionSplit(request, response.getResponseHeaders()).then(Mono.just(response)) ); } @Override public void enableThroughputControl(ThroughputControlStore throughputControlStore) { } private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { if (request.getResourceType() == ResourceType.DocumentCollection && request.getOperationType() == OperationType.Delete) { String resourceId; if (request.getIsNameBased()) { resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID); } else { resourceId = request.getResourceId(); } this.sessionContainer.clearTokenByResourceId(resourceId); } else { this.sessionContainer.setSessionToken(request, responseHeaders); } } private Mono<Void> captureSessionTokenAndHandlePartitionSplit(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { this.captureSessionToken(request, responseHeaders); if (request.requestContext.resolvedPartitionKeyRange != null && StringUtils.isNotEmpty(request.requestContext.resolvedCollectionRid) && StringUtils.isNotEmpty(responseHeaders.get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID)) && !responseHeaders.get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID).equals(request.requestContext.resolvedPartitionKeyRange.getId())) { return this.partitionKeyRangeCache.refreshAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request.requestContext.resolvedCollectionRid) .flatMap(collectionRoutingMapValueHolder -> Mono.empty()); } return Mono.empty(); } private Mono<Void> addIntendedCollectionRidAndSessionToken(RxDocumentServiceRequest request) { return applySessionToken(request).then(addIntendedCollectionRid(request)); } private Mono<Void> addIntendedCollectionRid(RxDocumentServiceRequest request) { if (this.collectionCache != null && request.getResourceType().equals(ResourceType.Document)) { return this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request).flatMap(documentCollectionValueHolder -> { if (StringUtils.isEmpty(request.getHeaders().get(INTENDED_COLLECTION_RID_HEADER))) { request.getHeaders().put(INTENDED_COLLECTION_RID_HEADER, request.requestContext.resolvedCollectionRid); } else { request.intendedCollectionRidPassedIntoSDK = true; } return Mono.empty(); }); } return Mono.empty(); } private static boolean isMasterOperation(ResourceType resourceType, OperationType operationType) { return ReplicatedResourceClientUtils.isMasterResource(resourceType) || isStoredProcedureMasterOperation(resourceType, operationType) || operationType == OperationType.QueryPlan; } private static boolean isStoredProcedureMasterOperation(ResourceType resourceType, OperationType operationType) { return resourceType == ResourceType.StoredProcedure && operationType != OperationType.ExecuteJavaScript; } }
NPE check, how can we be sure `collectionValueHolder.v` is not null? Collection delete scenario, or collection delete, recreate with the same name?
private Mono<Void> applySessionToken(RxDocumentServiceRequest request) { Map<String, String> headers = request.getHeaders(); Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null"); if (isMasterOperation(request.getResourceType(), request.getOperationType())) { if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return Mono.empty(); } if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { return Mono.empty(); } boolean sessionConsistency = RequestHelper.getConsistencyLevelToUse(this.gatewayServiceConfigurationReader, request) == ConsistencyLevel.SESSION; if (!sessionConsistency || (!request.isReadOnlyRequest() && request.getOperationType() != OperationType.Batch && !this.useMultipleWriteLocations)) { return Mono.empty(); } String partitionKeyRangeId = request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID); PartitionKeyInternal partitionKeyInternal = request.getPartitionKeyInternal(); if (StringUtils.isNotEmpty(partitionKeyRangeId)) { SessionTokenHelper.setPartitionLocalSessionToken(request, partitionKeyRangeId, sessionContainer); return Mono.empty(); } else if (partitionKeyInternal != null) { return this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request). flatMap(collectionValueHolder -> partitionKeyRangeCache.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collectionValueHolder.v.getResourceId(), null, null).flatMap(collectionRoutingMapValueHolder -> { String effectivePartitionKeyString = PartitionKeyInternalHelper .getEffectivePartitionKeyString( partitionKeyInternal, collectionValueHolder.v.getPartitionKey()); PartitionKeyRange range = collectionRoutingMapValueHolder.v.getRangeByEffectivePartitionKey(effectivePartitionKeyString); request.requestContext.resolvedPartitionKeyRange = range; SessionTokenHelper.setPartitionLocalSessionToken(request, sessionContainer); return Mono.empty(); })); } else { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } return Mono.empty(); } }
collectionValueHolder.v.getPartitionKey());
private Mono<Void> applySessionToken(RxDocumentServiceRequest request) { Map<String, String> headers = request.getHeaders(); Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null"); if (isMasterOperation(request.getResourceType(), request.getOperationType())) { if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return Mono.empty(); } boolean sessionConsistency = RequestHelper.getConsistencyLevelToUse(this.gatewayServiceConfigurationReader, request) == ConsistencyLevel.SESSION; if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { if (!sessionConsistency || (!request.isReadOnlyRequest() && request.getOperationType() != OperationType.Batch && !this.useMultipleWriteLocations)){ request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return Mono.empty(); } if (!sessionConsistency || (!request.isReadOnlyRequest() && request.getOperationType() != OperationType.Batch && !this.useMultipleWriteLocations)) { return Mono.empty(); } if (this.collectionCache != null && this.partitionKeyRangeCache != null) { return this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request). flatMap(collectionValueHolder -> { if(collectionValueHolder== null || collectionValueHolder.v == null) { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } return Mono.empty(); } return partitionKeyRangeCache.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collectionValueHolder.v.getResourceId(), null, null).flatMap(collectionRoutingMapValueHolder -> { if (collectionRoutingMapValueHolder == null || collectionRoutingMapValueHolder.v == null) { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } return Mono.empty(); } String partitionKeyRangeId = request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID); PartitionKeyInternal partitionKeyInternal = request.getPartitionKeyInternal(); if (StringUtils.isNotEmpty(partitionKeyRangeId)) { PartitionKeyRange range = collectionRoutingMapValueHolder.v.getRangeByPartitionKeyRangeId(partitionKeyRangeId); request.requestContext.resolvedPartitionKeyRange = range; if (request.requestContext.resolvedPartitionKeyRange == null) { SessionTokenHelper.setPartitionLocalSessionToken(request, partitionKeyRangeId, sessionContainer); } else { SessionTokenHelper.setPartitionLocalSessionToken(request, sessionContainer); } } else if (partitionKeyInternal != null) { String effectivePartitionKeyString = PartitionKeyInternalHelper .getEffectivePartitionKeyString( partitionKeyInternal, collectionValueHolder.v.getPartitionKey()); PartitionKeyRange range = collectionRoutingMapValueHolder.v.getRangeByEffectivePartitionKey(effectivePartitionKeyString); request.requestContext.resolvedPartitionKeyRange = range; SessionTokenHelper.setPartitionLocalSessionToken(request, sessionContainer); } else { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } } return Mono.empty(); }); }); } else { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } return Mono.empty(); } }
class RxGatewayStoreModel implements RxStoreModel { private final static byte[] EMPTY_BYTE_ARRAY = {}; private final DiagnosticsClientContext clientContext; private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class); private final Map<String, String> defaultHeaders; private final HttpClient httpClient; private final QueryCompatibilityMode queryCompatibilityMode; private final GlobalEndpointManager globalEndpointManager; private ConsistencyLevel defaultConsistencyLevel; private ISessionContainer sessionContainer; private ThroughputControlStore throughputControlStore; private boolean useMultipleWriteLocations; private RxPartitionKeyRangeCache partitionKeyRangeCache; private GatewayServiceConfigurationReader gatewayServiceConfigurationReader; private RxClientCollectionCache collectionCache; public RxGatewayStoreModel( DiagnosticsClientContext clientContext, ISessionContainer sessionContainer, ConsistencyLevel defaultConsistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient) { this.clientContext = clientContext; this.defaultHeaders = new HashMap<>(); this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL, "no-cache"); this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); if (userAgentContainer == null) { userAgentContainer = new UserAgentContainer(); } this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent()); if (defaultConsistencyLevel != null) { this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, defaultConsistencyLevel.toString()); } this.defaultConsistencyLevel = defaultConsistencyLevel; this.globalEndpointManager = globalEndpointManager; this.queryCompatibilityMode = queryCompatibilityMode; this.httpClient = httpClient; this.sessionContainer = sessionContainer; } void setGatewayServiceConfigurationReader(GatewayServiceConfigurationReader gatewayServiceConfigurationReader) { this.gatewayServiceConfigurationReader = gatewayServiceConfigurationReader; } public void setPartitionKeyRangeCache(RxPartitionKeyRangeCache partitionKeyRangeCache) { this.partitionKeyRangeCache = partitionKeyRangeCache; } public void setUseMultipleWriteLocations(boolean useMultipleWriteLocations) { this.useMultipleWriteLocations = useMultipleWriteLocations; } public void setCollectionCache(RxClientCollectionCache collectionCache) { this.collectionCache = collectionCache; } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PATCH); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PUT); } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.DELETE); } private Mono<RxDocumentServiceResponse> deleteByPartitionKey(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { if(request.getOperationType() != OperationType.QueryPlan) { request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true"); } switch (this.queryCompatibilityMode) { case SqlQuery: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.SQL); break; case Default: case Query: default: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.QUERY_JSON); break; } return this.performRequest(request, HttpMethod.POST); } public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) { try { if (request.requestContext.cosmosDiagnostics == null) { request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics(); } URI uri = getUri(request); request.requestContext.resourcePhysicalAddress = uri.toString(); if (this.throughputControlStore != null) { return this.throughputControlStore.processRequest(request, performRequestInternal(request, method, uri)); } return this.performRequestInternal(request, method, uri); } catch (Exception e) { return Mono.error(e); } } /** * Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse. * * @param request * @param method * @param requestUri * @return Flux<RxDocumentServiceResponse> */ public Mono<RxDocumentServiceResponse> performRequestInternal(RxDocumentServiceRequest request, HttpMethod method, URI requestUri) { try { HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders()); Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux(); HttpRequest httpRequest = new HttpRequest(method, requestUri, requestUri.getPort(), httpHeaders, contentAsByteArray); Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds()); if (OperationType.QueryPlan.equals(request.getOperationType())) { responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds()); } else if (request.isAddressRefresh()) { responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()); } Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout); return toDocumentServiceResponse(httpResponseMono, request, httpRequest); } catch (Exception e) { return Mono.error(e); } } private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) { HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size()); for (Entry<String, String> entry : this.defaultHeaders.entrySet()) { if (!headers.containsKey(entry.getKey())) { httpHeaders.set(entry.getKey(), entry.getValue()); } } if (headers != null) { for (Entry<String, String> entry : headers.entrySet()) { if (entry.getValue() == null) { httpHeaders.set(entry.getKey(), ""); } else { httpHeaders.set(entry.getKey(), entry.getValue()); } } } return httpHeaders; } private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException { URI rootUri = request.getEndpointOverride(); if (rootUri == null) { if (request.getIsMedia()) { rootUri = this.globalEndpointManager.getWriteEndpoints().get(0); } else { rootUri = this.globalEndpointManager.resolveServiceEndpoint(request); } } String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed); if(request.getResourceType().equals(ResourceType.DatabaseAccount)) { path = StringUtils.EMPTY; } return new URI("https", null, rootUri.getHost(), rootUri.getPort(), ensureSlashPrefixed(path), null, null); } private String ensureSlashPrefixed(String path) { if (path == null) { return null; } if (path.startsWith("/")) { return path; } return "/" + path; } /** * Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable. * * * Once the customer code subscribes to the observable returned by the CRUD APIs, * the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made. * * @param httpResponseMono * @param request * @return {@link Mono} */ private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono, RxDocumentServiceRequest request, HttpRequest httpRequest) { return httpResponseMono.flatMap(httpResponse -> { HttpHeaders httpResponseHeaders = httpResponse.headers(); int httpResponseStatus = httpResponse.statusCode(); Mono<byte[]> contentObservable = httpResponse .bodyAsByteArray() .switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY)); return contentObservable .map(content -> { ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord(); if (reactorNettyRequestRecord != null) { reactorNettyRequestRecord.setTimeCompleted(Instant.now()); BridgeInternal.setGatewayRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, reactorNettyRequestRecord.takeTimelineSnapshot()); } validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content); StoreResponse rsp = new StoreResponse(httpResponseStatus, HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()), content); DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot()); if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null); DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics); } return rsp; }) .single(); }).map(rsp -> { if (httpRequest.reactorNettyRequestRecord() != null) { return new RxDocumentServiceResponse(this.clientContext, rsp, httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot()); } else { return new RxDocumentServiceResponse(this.clientContext, rsp); } }).onErrorResume(throwable -> { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (!(unwrappedException instanceof Exception)) { logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException); return Mono.error(unwrappedException); } Exception exception = (Exception) unwrappedException; CosmosException dce; if (!(exception instanceof CosmosException)) { logger.error("Network failure", exception); dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, 0, exception); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); } else { dce = (CosmosException) exception; } if (WebExceptionUtility.isNetworkFailure(dce)) { if (WebExceptionUtility.isReadTimeoutException(dce)) { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); } else { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE); } } if (request.requestContext.cosmosDiagnostics != null) { if (BridgeInternal.getClientSideRequestStatics(request.requestContext.cosmosDiagnostics).getGatewayRequestTimeline() == null && httpRequest.reactorNettyRequestRecord() != null) { BridgeInternal.setGatewayRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot()); } BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); }); } private void validateOrThrow(RxDocumentServiceRequest request, HttpResponseStatus status, HttpHeaders headers, byte[] bodyAsBytes) { int statusCode = status.code(); if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) { String statusCodeString = status.reasonPhrase() != null ? status.reasonPhrase().replace(" ", "") : ""; String body = bodyAsBytes != null ? new String(bodyAsBytes) : null; CosmosError cosmosError; cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError(); cosmosError = new CosmosError(statusCodeString, String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString), cosmosError.getPartitionedQueryExecutionInfo()); CosmosException dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, statusCode, cosmosError, headers.toMap()); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); throw dce; } } private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) { switch (request.getOperationType()) { case Create: case Batch: return this.create(request); case Patch: return this.patch(request); case Upsert: return this.upsert(request); case Delete: if (request.getResourceType() == ResourceType.PartitionKey) { return this.deleteByPartitionKey(request); } return this.delete(request); case ExecuteJavaScript: return this.execute(request); case Read: return this.read(request); case ReadFeed: return this.readFeed(request); case Replace: return this.replace(request); case SqlQuery: case Query: case QueryPlan: return this.query(request); default: throw new IllegalStateException("Unknown operation setType " + request.getOperationType()); } } private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) { Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single(); return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy(BridgeInternal.getRetryContext(request.requestContext.cosmosDiagnostics))); } @Override public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) { Mono<RxDocumentServiceResponse> responseObs = this.applySessionToken(request).then(invokeAsync(request)); return responseObs.onErrorResume( e -> { CosmosException dce = Utils.as(e, CosmosException.class); if (dce == null) { logger.error("unexpected failure {}", e.getMessage(), e); return Mono.error(e); } if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) && (dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED || dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT || ( dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND && !Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) { this.captureSessionToken(request, dce.getResponseHeaders()); } if (Exceptions.isThroughputControlRequestRateTooLargeException(dce)) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); } ).map(response -> { this.captureSessionToken(request, response.getResponseHeaders()); return response; } ); } @Override public void enableThroughputControl(ThroughputControlStore throughputControlStore) { } private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { if (request.getResourceType() == ResourceType.DocumentCollection && request.getOperationType() == OperationType.Delete) { String resourceId; if (request.getIsNameBased()) { resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID); } else { resourceId = request.getResourceId(); } this.sessionContainer.clearTokenByResourceId(resourceId); } else { this.sessionContainer.setSessionToken(request, responseHeaders); } } private static boolean isMasterOperation(ResourceType resourceType, OperationType operationType) { return ReplicatedResourceClientUtils.isMasterResource(resourceType) || isStoredProcedureMasterOperation(resourceType, operationType) || operationType == OperationType.QueryPlan; } private static boolean isStoredProcedureMasterOperation(ResourceType resourceType, OperationType operationType) { return resourceType == ResourceType.StoredProcedure && operationType != OperationType.ExecuteJavaScript; } }
class RxGatewayStoreModel implements RxStoreModel { private final static byte[] EMPTY_BYTE_ARRAY = {}; private final DiagnosticsClientContext clientContext; private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class); private final Map<String, String> defaultHeaders; private final HttpClient httpClient; private final QueryCompatibilityMode queryCompatibilityMode; private final GlobalEndpointManager globalEndpointManager; private ConsistencyLevel defaultConsistencyLevel; private ISessionContainer sessionContainer; private ThroughputControlStore throughputControlStore; private boolean useMultipleWriteLocations; private RxPartitionKeyRangeCache partitionKeyRangeCache; private GatewayServiceConfigurationReader gatewayServiceConfigurationReader; private RxClientCollectionCache collectionCache; public RxGatewayStoreModel( DiagnosticsClientContext clientContext, ISessionContainer sessionContainer, ConsistencyLevel defaultConsistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient, ApiType apiType) { this.clientContext = clientContext; this.defaultHeaders = new HashMap<>(); this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL, "no-cache"); this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); if (apiType != null){ this.defaultHeaders.put(HttpConstants.HttpHeaders.API_TYPE, apiType.toString()); } if (userAgentContainer == null) { userAgentContainer = new UserAgentContainer(); } this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent()); if (defaultConsistencyLevel != null) { this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, defaultConsistencyLevel.toString()); } this.defaultConsistencyLevel = defaultConsistencyLevel; this.globalEndpointManager = globalEndpointManager; this.queryCompatibilityMode = queryCompatibilityMode; this.httpClient = httpClient; this.sessionContainer = sessionContainer; } void setGatewayServiceConfigurationReader(GatewayServiceConfigurationReader gatewayServiceConfigurationReader) { this.gatewayServiceConfigurationReader = gatewayServiceConfigurationReader; } public void setPartitionKeyRangeCache(RxPartitionKeyRangeCache partitionKeyRangeCache) { this.partitionKeyRangeCache = partitionKeyRangeCache; } public void setUseMultipleWriteLocations(boolean useMultipleWriteLocations) { this.useMultipleWriteLocations = useMultipleWriteLocations; } boolean isUseMultipleWriteLocations() { return useMultipleWriteLocations; } RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return partitionKeyRangeCache; } GatewayServiceConfigurationReader getGatewayServiceConfigurationReader() { return gatewayServiceConfigurationReader; } RxClientCollectionCache getCollectionCache() { return collectionCache; } public void setCollectionCache(RxClientCollectionCache collectionCache) { this.collectionCache = collectionCache; } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PATCH); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PUT); } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.DELETE); } private Mono<RxDocumentServiceResponse> deleteByPartitionKey(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { if(request.getOperationType() != OperationType.QueryPlan) { request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true"); } switch (this.queryCompatibilityMode) { case SqlQuery: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.SQL); break; case Default: case Query: default: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.QUERY_JSON); break; } return this.performRequest(request, HttpMethod.POST); } public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) { try { if (request.requestContext.cosmosDiagnostics == null) { request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics(); } URI uri = getUri(request); request.requestContext.resourcePhysicalAddress = uri.toString(); if (this.throughputControlStore != null) { return this.throughputControlStore.processRequest(request, performRequestInternal(request, method, uri)); } return this.performRequestInternal(request, method, uri); } catch (Exception e) { return Mono.error(e); } } /** * Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse. * * @param request * @param method * @param requestUri * @return Flux<RxDocumentServiceResponse> */ public Mono<RxDocumentServiceResponse> performRequestInternal(RxDocumentServiceRequest request, HttpMethod method, URI requestUri) { try { HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders()); Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux(); HttpRequest httpRequest = new HttpRequest(method, requestUri, requestUri.getPort(), httpHeaders, contentAsByteArray); Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds()); if (OperationType.QueryPlan.equals(request.getOperationType())) { responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds()); } else if (request.isAddressRefresh()) { responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()); } Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout); return toDocumentServiceResponse(httpResponseMono, request, httpRequest); } catch (Exception e) { return Mono.error(e); } } private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) { HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size()); for (Entry<String, String> entry : this.defaultHeaders.entrySet()) { if (!headers.containsKey(entry.getKey())) { httpHeaders.set(entry.getKey(), entry.getValue()); } } if (headers != null) { for (Entry<String, String> entry : headers.entrySet()) { if (entry.getValue() == null) { httpHeaders.set(entry.getKey(), ""); } else { httpHeaders.set(entry.getKey(), entry.getValue()); } } } return httpHeaders; } private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException { URI rootUri = request.getEndpointOverride(); if (rootUri == null) { if (request.getIsMedia()) { rootUri = this.globalEndpointManager.getWriteEndpoints().get(0); } else { rootUri = this.globalEndpointManager.resolveServiceEndpoint(request); } } String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed); if(request.getResourceType().equals(ResourceType.DatabaseAccount)) { path = StringUtils.EMPTY; } return new URI("https", null, rootUri.getHost(), rootUri.getPort(), ensureSlashPrefixed(path), null, null); } private String ensureSlashPrefixed(String path) { if (path == null) { return null; } if (path.startsWith("/")) { return path; } return "/" + path; } /** * Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable. * * * Once the customer code subscribes to the observable returned by the CRUD APIs, * the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made. * * @param httpResponseMono * @param request * @return {@link Mono} */ private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono, RxDocumentServiceRequest request, HttpRequest httpRequest) { return httpResponseMono.flatMap(httpResponse -> { HttpHeaders httpResponseHeaders = httpResponse.headers(); int httpResponseStatus = httpResponse.statusCode(); Mono<byte[]> contentObservable = httpResponse .bodyAsByteArray() .switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY)); return contentObservable .map(content -> { ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord(); if (reactorNettyRequestRecord != null) { reactorNettyRequestRecord.setTimeCompleted(Instant.now()); BridgeInternal.setGatewayRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, reactorNettyRequestRecord.takeTimelineSnapshot()); } validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content); StoreResponse rsp = new StoreResponse(httpResponseStatus, HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()), content); DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot()); if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null); DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics); } return rsp; }) .single(); }).map(rsp -> { if (httpRequest.reactorNettyRequestRecord() != null) { return new RxDocumentServiceResponse(this.clientContext, rsp, httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot()); } else { return new RxDocumentServiceResponse(this.clientContext, rsp); } }).onErrorResume(throwable -> { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (!(unwrappedException instanceof Exception)) { logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException); return Mono.error(unwrappedException); } Exception exception = (Exception) unwrappedException; CosmosException dce; if (!(exception instanceof CosmosException)) { logger.error("Network failure", exception); dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, 0, exception); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); } else { dce = (CosmosException) exception; } if (WebExceptionUtility.isNetworkFailure(dce)) { if (WebExceptionUtility.isReadTimeoutException(dce)) { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); } else { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE); } } if (request.requestContext.cosmosDiagnostics != null) { if (BridgeInternal.getClientSideRequestStatics(request.requestContext.cosmosDiagnostics).getGatewayRequestTimeline() == null && httpRequest.reactorNettyRequestRecord() != null) { BridgeInternal.setGatewayRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot()); } BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); }); } private void validateOrThrow(RxDocumentServiceRequest request, HttpResponseStatus status, HttpHeaders headers, byte[] bodyAsBytes) { int statusCode = status.code(); if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) { String statusCodeString = status.reasonPhrase() != null ? status.reasonPhrase().replace(" ", "") : ""; String body = bodyAsBytes != null ? new String(bodyAsBytes) : null; CosmosError cosmosError; cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError(); cosmosError = new CosmosError(statusCodeString, String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString), cosmosError.getPartitionedQueryExecutionInfo()); CosmosException dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, statusCode, cosmosError, headers.toMap()); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); throw dce; } } private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) { switch (request.getOperationType()) { case Create: case Batch: return this.create(request); case Patch: return this.patch(request); case Upsert: return this.upsert(request); case Delete: if (request.getResourceType() == ResourceType.PartitionKey) { return this.deleteByPartitionKey(request); } return this.delete(request); case ExecuteJavaScript: return this.execute(request); case Read: return this.read(request); case ReadFeed: return this.readFeed(request); case Replace: return this.replace(request); case SqlQuery: case Query: case QueryPlan: return this.query(request); default: throw new IllegalStateException("Unknown operation setType " + request.getOperationType()); } } private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) { Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single(); return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy(BridgeInternal.getRetryContext(request.requestContext.cosmosDiagnostics))); } @Override public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) { Mono<RxDocumentServiceResponse> responseObs = this.addIntendedCollectionRidAndSessionToken(request).then(invokeAsync(request)); return responseObs.onErrorResume( e -> { CosmosException dce = Utils.as(e, CosmosException.class); if (dce == null) { logger.error("unexpected failure {}", e.getMessage(), e); return Mono.error(e); } if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) && (dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED || dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT || ( dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND && !Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) { this.captureSessionToken(request, dce.getResponseHeaders()); } if (Exceptions.isThroughputControlRequestRateTooLargeException(dce)) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); } ).flatMap(response -> this.captureSessionTokenAndHandlePartitionSplit(request, response.getResponseHeaders()).then(Mono.just(response)) ); } @Override public void enableThroughputControl(ThroughputControlStore throughputControlStore) { } private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { if (request.getResourceType() == ResourceType.DocumentCollection && request.getOperationType() == OperationType.Delete) { String resourceId; if (request.getIsNameBased()) { resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID); } else { resourceId = request.getResourceId(); } this.sessionContainer.clearTokenByResourceId(resourceId); } else { this.sessionContainer.setSessionToken(request, responseHeaders); } } private Mono<Void> captureSessionTokenAndHandlePartitionSplit(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { this.captureSessionToken(request, responseHeaders); if (request.requestContext.resolvedPartitionKeyRange != null && StringUtils.isNotEmpty(request.requestContext.resolvedCollectionRid) && StringUtils.isNotEmpty(responseHeaders.get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID)) && !responseHeaders.get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID).equals(request.requestContext.resolvedPartitionKeyRange.getId())) { return this.partitionKeyRangeCache.refreshAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request.requestContext.resolvedCollectionRid) .flatMap(collectionRoutingMapValueHolder -> Mono.empty()); } return Mono.empty(); } private Mono<Void> addIntendedCollectionRidAndSessionToken(RxDocumentServiceRequest request) { return applySessionToken(request).then(addIntendedCollectionRid(request)); } private Mono<Void> addIntendedCollectionRid(RxDocumentServiceRequest request) { if (this.collectionCache != null && request.getResourceType().equals(ResourceType.Document)) { return this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request).flatMap(documentCollectionValueHolder -> { if (StringUtils.isEmpty(request.getHeaders().get(INTENDED_COLLECTION_RID_HEADER))) { request.getHeaders().put(INTENDED_COLLECTION_RID_HEADER, request.requestContext.resolvedCollectionRid); } else { request.intendedCollectionRidPassedIntoSDK = true; } return Mono.empty(); }); } return Mono.empty(); } private static boolean isMasterOperation(ResourceType resourceType, OperationType operationType) { return ReplicatedResourceClientUtils.isMasterResource(resourceType) || isStoredProcedureMasterOperation(resourceType, operationType) || operationType == OperationType.QueryPlan; } private static boolean isStoredProcedureMasterOperation(ResourceType resourceType, OperationType operationType) { return resourceType == ResourceType.StoredProcedure && operationType != OperationType.ExecuteJavaScript; } }
seems like now we are not checking whether the request consistency level is Eventual anymore. now, we are sending session token even if the request consistency level is explicitly requested?
private Mono<Void> addIntendedCollectionRidAndSessionToken(RxDocumentServiceRequest request) { return applySessionToken(request).then(addIntendedCollectionRid(request)); }
return applySessionToken(request).then(addIntendedCollectionRid(request));
private Mono<Void> addIntendedCollectionRidAndSessionToken(RxDocumentServiceRequest request) { return applySessionToken(request).then(addIntendedCollectionRid(request)); }
class RxGatewayStoreModel implements RxStoreModel { private final static byte[] EMPTY_BYTE_ARRAY = {}; private final DiagnosticsClientContext clientContext; private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class); private final Map<String, String> defaultHeaders; private final HttpClient httpClient; private final QueryCompatibilityMode queryCompatibilityMode; private final GlobalEndpointManager globalEndpointManager; private ConsistencyLevel defaultConsistencyLevel; private ISessionContainer sessionContainer; private ThroughputControlStore throughputControlStore; private boolean useMultipleWriteLocations; private RxPartitionKeyRangeCache partitionKeyRangeCache; private GatewayServiceConfigurationReader gatewayServiceConfigurationReader; private RxClientCollectionCache collectionCache; public RxGatewayStoreModel( DiagnosticsClientContext clientContext, ISessionContainer sessionContainer, ConsistencyLevel defaultConsistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient, ApiType apiType) { this.clientContext = clientContext; this.defaultHeaders = new HashMap<>(); this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL, "no-cache"); this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); if (apiType != null){ this.defaultHeaders.put(HttpConstants.HttpHeaders.API_TYPE, apiType.toString()); } if (userAgentContainer == null) { userAgentContainer = new UserAgentContainer(); } this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent()); if (defaultConsistencyLevel != null) { this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, defaultConsistencyLevel.toString()); } this.defaultConsistencyLevel = defaultConsistencyLevel; this.globalEndpointManager = globalEndpointManager; this.queryCompatibilityMode = queryCompatibilityMode; this.httpClient = httpClient; this.sessionContainer = sessionContainer; } void setGatewayServiceConfigurationReader(GatewayServiceConfigurationReader gatewayServiceConfigurationReader) { this.gatewayServiceConfigurationReader = gatewayServiceConfigurationReader; } public void setPartitionKeyRangeCache(RxPartitionKeyRangeCache partitionKeyRangeCache) { this.partitionKeyRangeCache = partitionKeyRangeCache; } public void setUseMultipleWriteLocations(boolean useMultipleWriteLocations) { this.useMultipleWriteLocations = useMultipleWriteLocations; } public void setCollectionCache(RxClientCollectionCache collectionCache) { this.collectionCache = collectionCache; } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PATCH); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PUT); } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.DELETE); } private Mono<RxDocumentServiceResponse> deleteByPartitionKey(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { if(request.getOperationType() != OperationType.QueryPlan) { request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true"); } switch (this.queryCompatibilityMode) { case SqlQuery: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.SQL); break; case Default: case Query: default: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.QUERY_JSON); break; } return this.performRequest(request, HttpMethod.POST); } public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) { try { if (request.requestContext.cosmosDiagnostics == null) { request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics(); } URI uri = getUri(request); request.requestContext.resourcePhysicalAddress = uri.toString(); if (this.throughputControlStore != null) { return this.throughputControlStore.processRequest(request, performRequestInternal(request, method, uri)); } return this.performRequestInternal(request, method, uri); } catch (Exception e) { return Mono.error(e); } } /** * Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse. * * @param request * @param method * @param requestUri * @return Flux<RxDocumentServiceResponse> */ public Mono<RxDocumentServiceResponse> performRequestInternal(RxDocumentServiceRequest request, HttpMethod method, URI requestUri) { try { HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders()); Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux(); HttpRequest httpRequest = new HttpRequest(method, requestUri, requestUri.getPort(), httpHeaders, contentAsByteArray); Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds()); if (OperationType.QueryPlan.equals(request.getOperationType())) { responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds()); } else if (request.isAddressRefresh()) { responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()); } Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout); return toDocumentServiceResponse(httpResponseMono, request, httpRequest); } catch (Exception e) { return Mono.error(e); } } private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) { HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size()); for (Entry<String, String> entry : this.defaultHeaders.entrySet()) { if (!headers.containsKey(entry.getKey())) { httpHeaders.set(entry.getKey(), entry.getValue()); } } if (headers != null) { for (Entry<String, String> entry : headers.entrySet()) { if (entry.getValue() == null) { httpHeaders.set(entry.getKey(), ""); } else { httpHeaders.set(entry.getKey(), entry.getValue()); } } } return httpHeaders; } private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException { URI rootUri = request.getEndpointOverride(); if (rootUri == null) { if (request.getIsMedia()) { rootUri = this.globalEndpointManager.getWriteEndpoints().get(0); } else { rootUri = this.globalEndpointManager.resolveServiceEndpoint(request); } } String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed); if(request.getResourceType().equals(ResourceType.DatabaseAccount)) { path = StringUtils.EMPTY; } return new URI("https", null, rootUri.getHost(), rootUri.getPort(), ensureSlashPrefixed(path), null, null); } private String ensureSlashPrefixed(String path) { if (path == null) { return null; } if (path.startsWith("/")) { return path; } return "/" + path; } /** * Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable. * * * Once the customer code subscribes to the observable returned by the CRUD APIs, * the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made. * * @param httpResponseMono * @param request * @return {@link Mono} */ private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono, RxDocumentServiceRequest request, HttpRequest httpRequest) { return httpResponseMono.flatMap(httpResponse -> { HttpHeaders httpResponseHeaders = httpResponse.headers(); int httpResponseStatus = httpResponse.statusCode(); Mono<byte[]> contentObservable = httpResponse .bodyAsByteArray() .switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY)); return contentObservable .map(content -> { ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord(); if (reactorNettyRequestRecord != null) { reactorNettyRequestRecord.setTimeCompleted(Instant.now()); BridgeInternal.setGatewayRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, reactorNettyRequestRecord.takeTimelineSnapshot()); } validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content); StoreResponse rsp = new StoreResponse(httpResponseStatus, HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()), content); DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot()); if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null); DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics); } return rsp; }) .single(); }).map(rsp -> { if (httpRequest.reactorNettyRequestRecord() != null) { return new RxDocumentServiceResponse(this.clientContext, rsp, httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot()); } else { return new RxDocumentServiceResponse(this.clientContext, rsp); } }).onErrorResume(throwable -> { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (!(unwrappedException instanceof Exception)) { logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException); return Mono.error(unwrappedException); } Exception exception = (Exception) unwrappedException; CosmosException dce; if (!(exception instanceof CosmosException)) { logger.error("Network failure", exception); dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, 0, exception); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); } else { dce = (CosmosException) exception; } if (WebExceptionUtility.isNetworkFailure(dce)) { if (WebExceptionUtility.isReadTimeoutException(dce)) { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); } else { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE); } } if (request.requestContext.cosmosDiagnostics != null) { if (BridgeInternal.getClientSideRequestStatics(request.requestContext.cosmosDiagnostics).getGatewayRequestTimeline() == null && httpRequest.reactorNettyRequestRecord() != null) { BridgeInternal.setGatewayRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot()); } BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); }); } private void validateOrThrow(RxDocumentServiceRequest request, HttpResponseStatus status, HttpHeaders headers, byte[] bodyAsBytes) { int statusCode = status.code(); if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) { String statusCodeString = status.reasonPhrase() != null ? status.reasonPhrase().replace(" ", "") : ""; String body = bodyAsBytes != null ? new String(bodyAsBytes) : null; CosmosError cosmosError; cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError(); cosmosError = new CosmosError(statusCodeString, String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString), cosmosError.getPartitionedQueryExecutionInfo()); CosmosException dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, statusCode, cosmosError, headers.toMap()); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); throw dce; } } private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) { switch (request.getOperationType()) { case Create: case Batch: return this.create(request); case Patch: return this.patch(request); case Upsert: return this.upsert(request); case Delete: if (request.getResourceType() == ResourceType.PartitionKey) { return this.deleteByPartitionKey(request); } return this.delete(request); case ExecuteJavaScript: return this.execute(request); case Read: return this.read(request); case ReadFeed: return this.readFeed(request); case Replace: return this.replace(request); case SqlQuery: case Query: case QueryPlan: return this.query(request); default: throw new IllegalStateException("Unknown operation setType " + request.getOperationType()); } } private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) { Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single(); return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy(BridgeInternal.getRetryContext(request.requestContext.cosmosDiagnostics))); } @Override public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) { Mono<RxDocumentServiceResponse> responseObs = this.addIntendedCollectionRidAndSessionToken(request).then(invokeAsync(request)); return responseObs.onErrorResume( e -> { CosmosException dce = Utils.as(e, CosmosException.class); if (dce == null) { logger.error("unexpected failure {}", e.getMessage(), e); return Mono.error(e); } if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) && (dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED || dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT || ( dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND && !Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) { this.captureSessionToken(request, dce.getResponseHeaders()); } if (Exceptions.isThroughputControlRequestRateTooLargeException(dce)) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); } ).flatMap(response -> this.captureSessionTokenAndHandlePartitionSplit(request, response.getResponseHeaders()).then(Mono.just(response)) ); } @Override public void enableThroughputControl(ThroughputControlStore throughputControlStore) { } private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { if (request.getResourceType() == ResourceType.DocumentCollection && request.getOperationType() == OperationType.Delete) { String resourceId; if (request.getIsNameBased()) { resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID); } else { resourceId = request.getResourceId(); } this.sessionContainer.clearTokenByResourceId(resourceId); } else { this.sessionContainer.setSessionToken(request, responseHeaders); } } private Mono<Void> captureSessionTokenAndHandlePartitionSplit(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { this.captureSessionToken(request, responseHeaders); if (request.requestContext.resolvedPartitionKeyRange != null && StringUtils.isNotEmpty(request.requestContext.resolvedCollectionRid) && StringUtils.isNotEmpty(responseHeaders.get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID)) && !responseHeaders.get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID).equals(request.requestContext.resolvedPartitionKeyRange.getId())) { return this.partitionKeyRangeCache.refreshAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request.requestContext.resolvedCollectionRid) .flatMap(collectionRoutingMapValueHolder -> Mono.empty()); } return Mono.empty(); } private Mono<Void> addIntendedCollectionRid(RxDocumentServiceRequest request) { if (this.collectionCache != null && request.getResourceType().equals(ResourceType.Document)) { return this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request).flatMap(documentCollectionValueHolder -> { if (StringUtils.isEmpty(request.getHeaders().get(INTENDED_COLLECTION_RID_HEADER))) { request.getHeaders().put(INTENDED_COLLECTION_RID_HEADER, request.requestContext.resolvedCollectionRid); } else { request.intendedCollectionRidPassedIntoSDK = true; } return Mono.empty(); }); } return Mono.empty(); } private Mono<Void> applySessionToken(RxDocumentServiceRequest request) { Map<String, String> headers = request.getHeaders(); Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null"); if (isMasterOperation(request.getResourceType(), request.getOperationType())) { if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return Mono.empty(); } boolean sessionConsistency = RequestHelper.getConsistencyLevelToUse(this.gatewayServiceConfigurationReader, request) == ConsistencyLevel.SESSION; if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { if (!sessionConsistency || (!request.isReadOnlyRequest() && request.getOperationType() != OperationType.Batch && !this.useMultipleWriteLocations)){ request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return Mono.empty(); } if (!sessionConsistency || (!request.isReadOnlyRequest() && request.getOperationType() != OperationType.Batch && !this.useMultipleWriteLocations)) { return Mono.empty(); } if (this.collectionCache != null && this.partitionKeyRangeCache != null) { return this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request). flatMap(collectionValueHolder -> { if(collectionValueHolder== null || collectionValueHolder.v == null) { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } return Mono.empty(); } return partitionKeyRangeCache.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collectionValueHolder.v.getResourceId(), null, null).flatMap(collectionRoutingMapValueHolder -> { if (collectionRoutingMapValueHolder == null || collectionRoutingMapValueHolder.v == null) { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } return Mono.empty(); } String partitionKeyRangeId = request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID); PartitionKeyInternal partitionKeyInternal = request.getPartitionKeyInternal(); if (StringUtils.isNotEmpty(partitionKeyRangeId)) { PartitionKeyRange range = collectionRoutingMapValueHolder.v.getRangeByPartitionKeyRangeId(partitionKeyRangeId); request.requestContext.resolvedPartitionKeyRange = range; } else if (partitionKeyInternal != null) { String effectivePartitionKeyString = PartitionKeyInternalHelper .getEffectivePartitionKeyString( partitionKeyInternal, collectionValueHolder.v.getPartitionKey()); PartitionKeyRange range = collectionRoutingMapValueHolder.v.getRangeByEffectivePartitionKey(effectivePartitionKeyString); request.requestContext.resolvedPartitionKeyRange = range; } else { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } return Mono.empty(); } SessionTokenHelper.setPartitionLocalSessionToken(request, sessionContainer); return Mono.empty(); }); }); } else { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } return Mono.empty(); } } private static boolean isMasterOperation(ResourceType resourceType, OperationType operationType) { return ReplicatedResourceClientUtils.isMasterResource(resourceType) || isStoredProcedureMasterOperation(resourceType, operationType) || operationType == OperationType.QueryPlan; } private static boolean isStoredProcedureMasterOperation(ResourceType resourceType, OperationType operationType) { return resourceType == ResourceType.StoredProcedure && operationType != OperationType.ExecuteJavaScript; } }
class RxGatewayStoreModel implements RxStoreModel { private final static byte[] EMPTY_BYTE_ARRAY = {}; private final DiagnosticsClientContext clientContext; private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class); private final Map<String, String> defaultHeaders; private final HttpClient httpClient; private final QueryCompatibilityMode queryCompatibilityMode; private final GlobalEndpointManager globalEndpointManager; private ConsistencyLevel defaultConsistencyLevel; private ISessionContainer sessionContainer; private ThroughputControlStore throughputControlStore; private boolean useMultipleWriteLocations; private RxPartitionKeyRangeCache partitionKeyRangeCache; private GatewayServiceConfigurationReader gatewayServiceConfigurationReader; private RxClientCollectionCache collectionCache; public RxGatewayStoreModel( DiagnosticsClientContext clientContext, ISessionContainer sessionContainer, ConsistencyLevel defaultConsistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient, ApiType apiType) { this.clientContext = clientContext; this.defaultHeaders = new HashMap<>(); this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL, "no-cache"); this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); if (apiType != null){ this.defaultHeaders.put(HttpConstants.HttpHeaders.API_TYPE, apiType.toString()); } if (userAgentContainer == null) { userAgentContainer = new UserAgentContainer(); } this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent()); if (defaultConsistencyLevel != null) { this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, defaultConsistencyLevel.toString()); } this.defaultConsistencyLevel = defaultConsistencyLevel; this.globalEndpointManager = globalEndpointManager; this.queryCompatibilityMode = queryCompatibilityMode; this.httpClient = httpClient; this.sessionContainer = sessionContainer; } void setGatewayServiceConfigurationReader(GatewayServiceConfigurationReader gatewayServiceConfigurationReader) { this.gatewayServiceConfigurationReader = gatewayServiceConfigurationReader; } public void setPartitionKeyRangeCache(RxPartitionKeyRangeCache partitionKeyRangeCache) { this.partitionKeyRangeCache = partitionKeyRangeCache; } public void setUseMultipleWriteLocations(boolean useMultipleWriteLocations) { this.useMultipleWriteLocations = useMultipleWriteLocations; } boolean isUseMultipleWriteLocations() { return useMultipleWriteLocations; } RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return partitionKeyRangeCache; } GatewayServiceConfigurationReader getGatewayServiceConfigurationReader() { return gatewayServiceConfigurationReader; } RxClientCollectionCache getCollectionCache() { return collectionCache; } public void setCollectionCache(RxClientCollectionCache collectionCache) { this.collectionCache = collectionCache; } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PATCH); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PUT); } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.DELETE); } private Mono<RxDocumentServiceResponse> deleteByPartitionKey(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { if(request.getOperationType() != OperationType.QueryPlan) { request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true"); } switch (this.queryCompatibilityMode) { case SqlQuery: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.SQL); break; case Default: case Query: default: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.QUERY_JSON); break; } return this.performRequest(request, HttpMethod.POST); } public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) { try { if (request.requestContext.cosmosDiagnostics == null) { request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics(); } URI uri = getUri(request); request.requestContext.resourcePhysicalAddress = uri.toString(); if (this.throughputControlStore != null) { return this.throughputControlStore.processRequest(request, performRequestInternal(request, method, uri)); } return this.performRequestInternal(request, method, uri); } catch (Exception e) { return Mono.error(e); } } /** * Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse. * * @param request * @param method * @param requestUri * @return Flux<RxDocumentServiceResponse> */ public Mono<RxDocumentServiceResponse> performRequestInternal(RxDocumentServiceRequest request, HttpMethod method, URI requestUri) { try { HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders()); Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux(); HttpRequest httpRequest = new HttpRequest(method, requestUri, requestUri.getPort(), httpHeaders, contentAsByteArray); Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds()); if (OperationType.QueryPlan.equals(request.getOperationType())) { responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds()); } else if (request.isAddressRefresh()) { responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()); } Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout); return toDocumentServiceResponse(httpResponseMono, request, httpRequest); } catch (Exception e) { return Mono.error(e); } } private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) { HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size()); for (Entry<String, String> entry : this.defaultHeaders.entrySet()) { if (!headers.containsKey(entry.getKey())) { httpHeaders.set(entry.getKey(), entry.getValue()); } } if (headers != null) { for (Entry<String, String> entry : headers.entrySet()) { if (entry.getValue() == null) { httpHeaders.set(entry.getKey(), ""); } else { httpHeaders.set(entry.getKey(), entry.getValue()); } } } return httpHeaders; } private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException { URI rootUri = request.getEndpointOverride(); if (rootUri == null) { if (request.getIsMedia()) { rootUri = this.globalEndpointManager.getWriteEndpoints().get(0); } else { rootUri = this.globalEndpointManager.resolveServiceEndpoint(request); } } String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed); if(request.getResourceType().equals(ResourceType.DatabaseAccount)) { path = StringUtils.EMPTY; } return new URI("https", null, rootUri.getHost(), rootUri.getPort(), ensureSlashPrefixed(path), null, null); } private String ensureSlashPrefixed(String path) { if (path == null) { return null; } if (path.startsWith("/")) { return path; } return "/" + path; } /** * Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable. * * * Once the customer code subscribes to the observable returned by the CRUD APIs, * the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made. * * @param httpResponseMono * @param request * @return {@link Mono} */ private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono, RxDocumentServiceRequest request, HttpRequest httpRequest) { return httpResponseMono.flatMap(httpResponse -> { HttpHeaders httpResponseHeaders = httpResponse.headers(); int httpResponseStatus = httpResponse.statusCode(); Mono<byte[]> contentObservable = httpResponse .bodyAsByteArray() .switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY)); return contentObservable .map(content -> { ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord(); if (reactorNettyRequestRecord != null) { reactorNettyRequestRecord.setTimeCompleted(Instant.now()); BridgeInternal.setGatewayRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, reactorNettyRequestRecord.takeTimelineSnapshot()); } validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content); StoreResponse rsp = new StoreResponse(httpResponseStatus, HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()), content); DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot()); if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null); DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics); } return rsp; }) .single(); }).map(rsp -> { if (httpRequest.reactorNettyRequestRecord() != null) { return new RxDocumentServiceResponse(this.clientContext, rsp, httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot()); } else { return new RxDocumentServiceResponse(this.clientContext, rsp); } }).onErrorResume(throwable -> { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (!(unwrappedException instanceof Exception)) { logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException); return Mono.error(unwrappedException); } Exception exception = (Exception) unwrappedException; CosmosException dce; if (!(exception instanceof CosmosException)) { logger.error("Network failure", exception); dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, 0, exception); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); } else { dce = (CosmosException) exception; } if (WebExceptionUtility.isNetworkFailure(dce)) { if (WebExceptionUtility.isReadTimeoutException(dce)) { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); } else { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE); } } if (request.requestContext.cosmosDiagnostics != null) { if (BridgeInternal.getClientSideRequestStatics(request.requestContext.cosmosDiagnostics).getGatewayRequestTimeline() == null && httpRequest.reactorNettyRequestRecord() != null) { BridgeInternal.setGatewayRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot()); } BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); }); } private void validateOrThrow(RxDocumentServiceRequest request, HttpResponseStatus status, HttpHeaders headers, byte[] bodyAsBytes) { int statusCode = status.code(); if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) { String statusCodeString = status.reasonPhrase() != null ? status.reasonPhrase().replace(" ", "") : ""; String body = bodyAsBytes != null ? new String(bodyAsBytes) : null; CosmosError cosmosError; cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError(); cosmosError = new CosmosError(statusCodeString, String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString), cosmosError.getPartitionedQueryExecutionInfo()); CosmosException dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, statusCode, cosmosError, headers.toMap()); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); throw dce; } } private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) { switch (request.getOperationType()) { case Create: case Batch: return this.create(request); case Patch: return this.patch(request); case Upsert: return this.upsert(request); case Delete: if (request.getResourceType() == ResourceType.PartitionKey) { return this.deleteByPartitionKey(request); } return this.delete(request); case ExecuteJavaScript: return this.execute(request); case Read: return this.read(request); case ReadFeed: return this.readFeed(request); case Replace: return this.replace(request); case SqlQuery: case Query: case QueryPlan: return this.query(request); default: throw new IllegalStateException("Unknown operation setType " + request.getOperationType()); } } private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) { Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single(); return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy(BridgeInternal.getRetryContext(request.requestContext.cosmosDiagnostics))); } @Override public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) { Mono<RxDocumentServiceResponse> responseObs = this.addIntendedCollectionRidAndSessionToken(request).then(invokeAsync(request)); return responseObs.onErrorResume( e -> { CosmosException dce = Utils.as(e, CosmosException.class); if (dce == null) { logger.error("unexpected failure {}", e.getMessage(), e); return Mono.error(e); } if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) && (dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED || dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT || ( dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND && !Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) { this.captureSessionToken(request, dce.getResponseHeaders()); } if (Exceptions.isThroughputControlRequestRateTooLargeException(dce)) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); } ).flatMap(response -> this.captureSessionTokenAndHandlePartitionSplit(request, response.getResponseHeaders()).then(Mono.just(response)) ); } @Override public void enableThroughputControl(ThroughputControlStore throughputControlStore) { } private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { if (request.getResourceType() == ResourceType.DocumentCollection && request.getOperationType() == OperationType.Delete) { String resourceId; if (request.getIsNameBased()) { resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID); } else { resourceId = request.getResourceId(); } this.sessionContainer.clearTokenByResourceId(resourceId); } else { this.sessionContainer.setSessionToken(request, responseHeaders); } } private Mono<Void> captureSessionTokenAndHandlePartitionSplit(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { this.captureSessionToken(request, responseHeaders); if (request.requestContext.resolvedPartitionKeyRange != null && StringUtils.isNotEmpty(request.requestContext.resolvedCollectionRid) && StringUtils.isNotEmpty(responseHeaders.get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID)) && !responseHeaders.get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID).equals(request.requestContext.resolvedPartitionKeyRange.getId())) { return this.partitionKeyRangeCache.refreshAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request.requestContext.resolvedCollectionRid) .flatMap(collectionRoutingMapValueHolder -> Mono.empty()); } return Mono.empty(); } private Mono<Void> addIntendedCollectionRid(RxDocumentServiceRequest request) { if (this.collectionCache != null && request.getResourceType().equals(ResourceType.Document)) { return this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request).flatMap(documentCollectionValueHolder -> { if (StringUtils.isEmpty(request.getHeaders().get(INTENDED_COLLECTION_RID_HEADER))) { request.getHeaders().put(INTENDED_COLLECTION_RID_HEADER, request.requestContext.resolvedCollectionRid); } else { request.intendedCollectionRidPassedIntoSDK = true; } return Mono.empty(); }); } return Mono.empty(); } private Mono<Void> applySessionToken(RxDocumentServiceRequest request) { Map<String, String> headers = request.getHeaders(); Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null"); if (isMasterOperation(request.getResourceType(), request.getOperationType())) { if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return Mono.empty(); } boolean sessionConsistency = RequestHelper.getConsistencyLevelToUse(this.gatewayServiceConfigurationReader, request) == ConsistencyLevel.SESSION; if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { if (!sessionConsistency || (!request.isReadOnlyRequest() && request.getOperationType() != OperationType.Batch && !this.useMultipleWriteLocations)){ request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return Mono.empty(); } if (!sessionConsistency || (!request.isReadOnlyRequest() && request.getOperationType() != OperationType.Batch && !this.useMultipleWriteLocations)) { return Mono.empty(); } if (this.collectionCache != null && this.partitionKeyRangeCache != null) { return this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request). flatMap(collectionValueHolder -> { if(collectionValueHolder== null || collectionValueHolder.v == null) { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } return Mono.empty(); } return partitionKeyRangeCache.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collectionValueHolder.v.getResourceId(), null, null).flatMap(collectionRoutingMapValueHolder -> { if (collectionRoutingMapValueHolder == null || collectionRoutingMapValueHolder.v == null) { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } return Mono.empty(); } String partitionKeyRangeId = request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID); PartitionKeyInternal partitionKeyInternal = request.getPartitionKeyInternal(); if (StringUtils.isNotEmpty(partitionKeyRangeId)) { PartitionKeyRange range = collectionRoutingMapValueHolder.v.getRangeByPartitionKeyRangeId(partitionKeyRangeId); request.requestContext.resolvedPartitionKeyRange = range; if (request.requestContext.resolvedPartitionKeyRange == null) { SessionTokenHelper.setPartitionLocalSessionToken(request, partitionKeyRangeId, sessionContainer); } else { SessionTokenHelper.setPartitionLocalSessionToken(request, sessionContainer); } } else if (partitionKeyInternal != null) { String effectivePartitionKeyString = PartitionKeyInternalHelper .getEffectivePartitionKeyString( partitionKeyInternal, collectionValueHolder.v.getPartitionKey()); PartitionKeyRange range = collectionRoutingMapValueHolder.v.getRangeByEffectivePartitionKey(effectivePartitionKeyString); request.requestContext.resolvedPartitionKeyRange = range; SessionTokenHelper.setPartitionLocalSessionToken(request, sessionContainer); } else { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } } return Mono.empty(); }); }); } else { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } return Mono.empty(); } } private static boolean isMasterOperation(ResourceType resourceType, OperationType operationType) { return ReplicatedResourceClientUtils.isMasterResource(resourceType) || isStoredProcedureMasterOperation(resourceType, operationType) || operationType == OperationType.QueryPlan; } private static boolean isStoredProcedureMasterOperation(ResourceType resourceType, OperationType operationType) { return resourceType == ResourceType.StoredProcedure && operationType != OperationType.ExecuteJavaScript; } }
for personal knowledge: when this could happen?
private Mono<Void> applySessionToken(RxDocumentServiceRequest request) { Map<String, String> headers = request.getHeaders(); Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null"); if (isMasterOperation(request.getResourceType(), request.getOperationType())) { if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return Mono.empty(); } boolean sessionConsistency = RequestHelper.getConsistencyLevelToUse(this.gatewayServiceConfigurationReader, request) == ConsistencyLevel.SESSION; if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { if (!sessionConsistency || (!request.isReadOnlyRequest() && request.getOperationType() != OperationType.Batch && !this.useMultipleWriteLocations)){ request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return Mono.empty(); } if (!sessionConsistency || (!request.isReadOnlyRequest() && request.getOperationType() != OperationType.Batch && !this.useMultipleWriteLocations)) { return Mono.empty(); } if (this.collectionCache != null && this.partitionKeyRangeCache != null) { return this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request). flatMap(collectionValueHolder -> { if(collectionValueHolder== null || collectionValueHolder.v == null) { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } return Mono.empty(); } return partitionKeyRangeCache.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collectionValueHolder.v.getResourceId(), null, null).flatMap(collectionRoutingMapValueHolder -> { if (collectionRoutingMapValueHolder == null || collectionRoutingMapValueHolder.v == null) { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } return Mono.empty(); } String partitionKeyRangeId = request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID); PartitionKeyInternal partitionKeyInternal = request.getPartitionKeyInternal(); if (StringUtils.isNotEmpty(partitionKeyRangeId)) { PartitionKeyRange range = collectionRoutingMapValueHolder.v.getRangeByPartitionKeyRangeId(partitionKeyRangeId); request.requestContext.resolvedPartitionKeyRange = range; } else if (partitionKeyInternal != null) { String effectivePartitionKeyString = PartitionKeyInternalHelper .getEffectivePartitionKeyString( partitionKeyInternal, collectionValueHolder.v.getPartitionKey()); PartitionKeyRange range = collectionRoutingMapValueHolder.v.getRangeByEffectivePartitionKey(effectivePartitionKeyString); request.requestContext.resolvedPartitionKeyRange = range; } else { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } return Mono.empty(); } SessionTokenHelper.setPartitionLocalSessionToken(request, sessionContainer); return Mono.empty(); }); }); } else { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } return Mono.empty(); } }
if(collectionValueHolder== null || collectionValueHolder.v == null) {
private Mono<Void> applySessionToken(RxDocumentServiceRequest request) { Map<String, String> headers = request.getHeaders(); Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null"); if (isMasterOperation(request.getResourceType(), request.getOperationType())) { if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return Mono.empty(); } boolean sessionConsistency = RequestHelper.getConsistencyLevelToUse(this.gatewayServiceConfigurationReader, request) == ConsistencyLevel.SESSION; if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { if (!sessionConsistency || (!request.isReadOnlyRequest() && request.getOperationType() != OperationType.Batch && !this.useMultipleWriteLocations)){ request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return Mono.empty(); } if (!sessionConsistency || (!request.isReadOnlyRequest() && request.getOperationType() != OperationType.Batch && !this.useMultipleWriteLocations)) { return Mono.empty(); } if (this.collectionCache != null && this.partitionKeyRangeCache != null) { return this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request). flatMap(collectionValueHolder -> { if(collectionValueHolder== null || collectionValueHolder.v == null) { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } return Mono.empty(); } return partitionKeyRangeCache.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collectionValueHolder.v.getResourceId(), null, null).flatMap(collectionRoutingMapValueHolder -> { if (collectionRoutingMapValueHolder == null || collectionRoutingMapValueHolder.v == null) { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } return Mono.empty(); } String partitionKeyRangeId = request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID); PartitionKeyInternal partitionKeyInternal = request.getPartitionKeyInternal(); if (StringUtils.isNotEmpty(partitionKeyRangeId)) { PartitionKeyRange range = collectionRoutingMapValueHolder.v.getRangeByPartitionKeyRangeId(partitionKeyRangeId); request.requestContext.resolvedPartitionKeyRange = range; if (request.requestContext.resolvedPartitionKeyRange == null) { SessionTokenHelper.setPartitionLocalSessionToken(request, partitionKeyRangeId, sessionContainer); } else { SessionTokenHelper.setPartitionLocalSessionToken(request, sessionContainer); } } else if (partitionKeyInternal != null) { String effectivePartitionKeyString = PartitionKeyInternalHelper .getEffectivePartitionKeyString( partitionKeyInternal, collectionValueHolder.v.getPartitionKey()); PartitionKeyRange range = collectionRoutingMapValueHolder.v.getRangeByEffectivePartitionKey(effectivePartitionKeyString); request.requestContext.resolvedPartitionKeyRange = range; SessionTokenHelper.setPartitionLocalSessionToken(request, sessionContainer); } else { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } } return Mono.empty(); }); }); } else { String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } return Mono.empty(); } }
class RxGatewayStoreModel implements RxStoreModel { private final static byte[] EMPTY_BYTE_ARRAY = {}; private final DiagnosticsClientContext clientContext; private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class); private final Map<String, String> defaultHeaders; private final HttpClient httpClient; private final QueryCompatibilityMode queryCompatibilityMode; private final GlobalEndpointManager globalEndpointManager; private ConsistencyLevel defaultConsistencyLevel; private ISessionContainer sessionContainer; private ThroughputControlStore throughputControlStore; private boolean useMultipleWriteLocations; private RxPartitionKeyRangeCache partitionKeyRangeCache; private GatewayServiceConfigurationReader gatewayServiceConfigurationReader; private RxClientCollectionCache collectionCache; public RxGatewayStoreModel( DiagnosticsClientContext clientContext, ISessionContainer sessionContainer, ConsistencyLevel defaultConsistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient, ApiType apiType) { this.clientContext = clientContext; this.defaultHeaders = new HashMap<>(); this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL, "no-cache"); this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); if (apiType != null){ this.defaultHeaders.put(HttpConstants.HttpHeaders.API_TYPE, apiType.toString()); } if (userAgentContainer == null) { userAgentContainer = new UserAgentContainer(); } this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent()); if (defaultConsistencyLevel != null) { this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, defaultConsistencyLevel.toString()); } this.defaultConsistencyLevel = defaultConsistencyLevel; this.globalEndpointManager = globalEndpointManager; this.queryCompatibilityMode = queryCompatibilityMode; this.httpClient = httpClient; this.sessionContainer = sessionContainer; } void setGatewayServiceConfigurationReader(GatewayServiceConfigurationReader gatewayServiceConfigurationReader) { this.gatewayServiceConfigurationReader = gatewayServiceConfigurationReader; } public void setPartitionKeyRangeCache(RxPartitionKeyRangeCache partitionKeyRangeCache) { this.partitionKeyRangeCache = partitionKeyRangeCache; } public void setUseMultipleWriteLocations(boolean useMultipleWriteLocations) { this.useMultipleWriteLocations = useMultipleWriteLocations; } public void setCollectionCache(RxClientCollectionCache collectionCache) { this.collectionCache = collectionCache; } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PATCH); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PUT); } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.DELETE); } private Mono<RxDocumentServiceResponse> deleteByPartitionKey(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { if(request.getOperationType() != OperationType.QueryPlan) { request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true"); } switch (this.queryCompatibilityMode) { case SqlQuery: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.SQL); break; case Default: case Query: default: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.QUERY_JSON); break; } return this.performRequest(request, HttpMethod.POST); } public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) { try { if (request.requestContext.cosmosDiagnostics == null) { request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics(); } URI uri = getUri(request); request.requestContext.resourcePhysicalAddress = uri.toString(); if (this.throughputControlStore != null) { return this.throughputControlStore.processRequest(request, performRequestInternal(request, method, uri)); } return this.performRequestInternal(request, method, uri); } catch (Exception e) { return Mono.error(e); } } /** * Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse. * * @param request * @param method * @param requestUri * @return Flux<RxDocumentServiceResponse> */ public Mono<RxDocumentServiceResponse> performRequestInternal(RxDocumentServiceRequest request, HttpMethod method, URI requestUri) { try { HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders()); Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux(); HttpRequest httpRequest = new HttpRequest(method, requestUri, requestUri.getPort(), httpHeaders, contentAsByteArray); Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds()); if (OperationType.QueryPlan.equals(request.getOperationType())) { responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds()); } else if (request.isAddressRefresh()) { responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()); } Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout); return toDocumentServiceResponse(httpResponseMono, request, httpRequest); } catch (Exception e) { return Mono.error(e); } } private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) { HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size()); for (Entry<String, String> entry : this.defaultHeaders.entrySet()) { if (!headers.containsKey(entry.getKey())) { httpHeaders.set(entry.getKey(), entry.getValue()); } } if (headers != null) { for (Entry<String, String> entry : headers.entrySet()) { if (entry.getValue() == null) { httpHeaders.set(entry.getKey(), ""); } else { httpHeaders.set(entry.getKey(), entry.getValue()); } } } return httpHeaders; } private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException { URI rootUri = request.getEndpointOverride(); if (rootUri == null) { if (request.getIsMedia()) { rootUri = this.globalEndpointManager.getWriteEndpoints().get(0); } else { rootUri = this.globalEndpointManager.resolveServiceEndpoint(request); } } String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed); if(request.getResourceType().equals(ResourceType.DatabaseAccount)) { path = StringUtils.EMPTY; } return new URI("https", null, rootUri.getHost(), rootUri.getPort(), ensureSlashPrefixed(path), null, null); } private String ensureSlashPrefixed(String path) { if (path == null) { return null; } if (path.startsWith("/")) { return path; } return "/" + path; } /** * Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable. * * * Once the customer code subscribes to the observable returned by the CRUD APIs, * the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made. * * @param httpResponseMono * @param request * @return {@link Mono} */ private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono, RxDocumentServiceRequest request, HttpRequest httpRequest) { return httpResponseMono.flatMap(httpResponse -> { HttpHeaders httpResponseHeaders = httpResponse.headers(); int httpResponseStatus = httpResponse.statusCode(); Mono<byte[]> contentObservable = httpResponse .bodyAsByteArray() .switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY)); return contentObservable .map(content -> { ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord(); if (reactorNettyRequestRecord != null) { reactorNettyRequestRecord.setTimeCompleted(Instant.now()); BridgeInternal.setGatewayRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, reactorNettyRequestRecord.takeTimelineSnapshot()); } validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content); StoreResponse rsp = new StoreResponse(httpResponseStatus, HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()), content); DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot()); if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null); DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics); } return rsp; }) .single(); }).map(rsp -> { if (httpRequest.reactorNettyRequestRecord() != null) { return new RxDocumentServiceResponse(this.clientContext, rsp, httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot()); } else { return new RxDocumentServiceResponse(this.clientContext, rsp); } }).onErrorResume(throwable -> { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (!(unwrappedException instanceof Exception)) { logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException); return Mono.error(unwrappedException); } Exception exception = (Exception) unwrappedException; CosmosException dce; if (!(exception instanceof CosmosException)) { logger.error("Network failure", exception); dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, 0, exception); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); } else { dce = (CosmosException) exception; } if (WebExceptionUtility.isNetworkFailure(dce)) { if (WebExceptionUtility.isReadTimeoutException(dce)) { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); } else { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE); } } if (request.requestContext.cosmosDiagnostics != null) { if (BridgeInternal.getClientSideRequestStatics(request.requestContext.cosmosDiagnostics).getGatewayRequestTimeline() == null && httpRequest.reactorNettyRequestRecord() != null) { BridgeInternal.setGatewayRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot()); } BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); }); } private void validateOrThrow(RxDocumentServiceRequest request, HttpResponseStatus status, HttpHeaders headers, byte[] bodyAsBytes) { int statusCode = status.code(); if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) { String statusCodeString = status.reasonPhrase() != null ? status.reasonPhrase().replace(" ", "") : ""; String body = bodyAsBytes != null ? new String(bodyAsBytes) : null; CosmosError cosmosError; cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError(); cosmosError = new CosmosError(statusCodeString, String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString), cosmosError.getPartitionedQueryExecutionInfo()); CosmosException dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, statusCode, cosmosError, headers.toMap()); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); throw dce; } } private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) { switch (request.getOperationType()) { case Create: case Batch: return this.create(request); case Patch: return this.patch(request); case Upsert: return this.upsert(request); case Delete: if (request.getResourceType() == ResourceType.PartitionKey) { return this.deleteByPartitionKey(request); } return this.delete(request); case ExecuteJavaScript: return this.execute(request); case Read: return this.read(request); case ReadFeed: return this.readFeed(request); case Replace: return this.replace(request); case SqlQuery: case Query: case QueryPlan: return this.query(request); default: throw new IllegalStateException("Unknown operation setType " + request.getOperationType()); } } private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) { Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single(); return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy(BridgeInternal.getRetryContext(request.requestContext.cosmosDiagnostics))); } @Override public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) { Mono<RxDocumentServiceResponse> responseObs = this.addIntendedCollectionRidAndSessionToken(request).then(invokeAsync(request)); return responseObs.onErrorResume( e -> { CosmosException dce = Utils.as(e, CosmosException.class); if (dce == null) { logger.error("unexpected failure {}", e.getMessage(), e); return Mono.error(e); } if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) && (dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED || dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT || ( dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND && !Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) { this.captureSessionToken(request, dce.getResponseHeaders()); } if (Exceptions.isThroughputControlRequestRateTooLargeException(dce)) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); } ).flatMap(response -> this.captureSessionTokenAndHandlePartitionSplit(request, response.getResponseHeaders()).then(Mono.just(response)) ); } @Override public void enableThroughputControl(ThroughputControlStore throughputControlStore) { } private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { if (request.getResourceType() == ResourceType.DocumentCollection && request.getOperationType() == OperationType.Delete) { String resourceId; if (request.getIsNameBased()) { resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID); } else { resourceId = request.getResourceId(); } this.sessionContainer.clearTokenByResourceId(resourceId); } else { this.sessionContainer.setSessionToken(request, responseHeaders); } } private Mono<Void> captureSessionTokenAndHandlePartitionSplit(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { this.captureSessionToken(request, responseHeaders); if (request.requestContext.resolvedPartitionKeyRange != null && StringUtils.isNotEmpty(request.requestContext.resolvedCollectionRid) && StringUtils.isNotEmpty(responseHeaders.get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID)) && !responseHeaders.get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID).equals(request.requestContext.resolvedPartitionKeyRange.getId())) { return this.partitionKeyRangeCache.refreshAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request.requestContext.resolvedCollectionRid) .flatMap(collectionRoutingMapValueHolder -> Mono.empty()); } return Mono.empty(); } private Mono<Void> addIntendedCollectionRidAndSessionToken(RxDocumentServiceRequest request) { return applySessionToken(request).then(addIntendedCollectionRid(request)); } private Mono<Void> addIntendedCollectionRid(RxDocumentServiceRequest request) { if (this.collectionCache != null && request.getResourceType().equals(ResourceType.Document)) { return this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request).flatMap(documentCollectionValueHolder -> { if (StringUtils.isEmpty(request.getHeaders().get(INTENDED_COLLECTION_RID_HEADER))) { request.getHeaders().put(INTENDED_COLLECTION_RID_HEADER, request.requestContext.resolvedCollectionRid); } else { request.intendedCollectionRidPassedIntoSDK = true; } return Mono.empty(); }); } return Mono.empty(); } private static boolean isMasterOperation(ResourceType resourceType, OperationType operationType) { return ReplicatedResourceClientUtils.isMasterResource(resourceType) || isStoredProcedureMasterOperation(resourceType, operationType) || operationType == OperationType.QueryPlan; } private static boolean isStoredProcedureMasterOperation(ResourceType resourceType, OperationType operationType) { return resourceType == ResourceType.StoredProcedure && operationType != OperationType.ExecuteJavaScript; } }
class RxGatewayStoreModel implements RxStoreModel { private final static byte[] EMPTY_BYTE_ARRAY = {}; private final DiagnosticsClientContext clientContext; private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class); private final Map<String, String> defaultHeaders; private final HttpClient httpClient; private final QueryCompatibilityMode queryCompatibilityMode; private final GlobalEndpointManager globalEndpointManager; private ConsistencyLevel defaultConsistencyLevel; private ISessionContainer sessionContainer; private ThroughputControlStore throughputControlStore; private boolean useMultipleWriteLocations; private RxPartitionKeyRangeCache partitionKeyRangeCache; private GatewayServiceConfigurationReader gatewayServiceConfigurationReader; private RxClientCollectionCache collectionCache; public RxGatewayStoreModel( DiagnosticsClientContext clientContext, ISessionContainer sessionContainer, ConsistencyLevel defaultConsistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient, ApiType apiType) { this.clientContext = clientContext; this.defaultHeaders = new HashMap<>(); this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL, "no-cache"); this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); if (apiType != null){ this.defaultHeaders.put(HttpConstants.HttpHeaders.API_TYPE, apiType.toString()); } if (userAgentContainer == null) { userAgentContainer = new UserAgentContainer(); } this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent()); if (defaultConsistencyLevel != null) { this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, defaultConsistencyLevel.toString()); } this.defaultConsistencyLevel = defaultConsistencyLevel; this.globalEndpointManager = globalEndpointManager; this.queryCompatibilityMode = queryCompatibilityMode; this.httpClient = httpClient; this.sessionContainer = sessionContainer; } void setGatewayServiceConfigurationReader(GatewayServiceConfigurationReader gatewayServiceConfigurationReader) { this.gatewayServiceConfigurationReader = gatewayServiceConfigurationReader; } public void setPartitionKeyRangeCache(RxPartitionKeyRangeCache partitionKeyRangeCache) { this.partitionKeyRangeCache = partitionKeyRangeCache; } public void setUseMultipleWriteLocations(boolean useMultipleWriteLocations) { this.useMultipleWriteLocations = useMultipleWriteLocations; } boolean isUseMultipleWriteLocations() { return useMultipleWriteLocations; } RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return partitionKeyRangeCache; } GatewayServiceConfigurationReader getGatewayServiceConfigurationReader() { return gatewayServiceConfigurationReader; } RxClientCollectionCache getCollectionCache() { return collectionCache; } public void setCollectionCache(RxClientCollectionCache collectionCache) { this.collectionCache = collectionCache; } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PATCH); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PUT); } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.DELETE); } private Mono<RxDocumentServiceResponse> deleteByPartitionKey(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { if(request.getOperationType() != OperationType.QueryPlan) { request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true"); } switch (this.queryCompatibilityMode) { case SqlQuery: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.SQL); break; case Default: case Query: default: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.QUERY_JSON); break; } return this.performRequest(request, HttpMethod.POST); } public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) { try { if (request.requestContext.cosmosDiagnostics == null) { request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics(); } URI uri = getUri(request); request.requestContext.resourcePhysicalAddress = uri.toString(); if (this.throughputControlStore != null) { return this.throughputControlStore.processRequest(request, performRequestInternal(request, method, uri)); } return this.performRequestInternal(request, method, uri); } catch (Exception e) { return Mono.error(e); } } /** * Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse. * * @param request * @param method * @param requestUri * @return Flux<RxDocumentServiceResponse> */ public Mono<RxDocumentServiceResponse> performRequestInternal(RxDocumentServiceRequest request, HttpMethod method, URI requestUri) { try { HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders()); Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux(); HttpRequest httpRequest = new HttpRequest(method, requestUri, requestUri.getPort(), httpHeaders, contentAsByteArray); Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds()); if (OperationType.QueryPlan.equals(request.getOperationType())) { responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds()); } else if (request.isAddressRefresh()) { responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()); } Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout); return toDocumentServiceResponse(httpResponseMono, request, httpRequest); } catch (Exception e) { return Mono.error(e); } } private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) { HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size()); for (Entry<String, String> entry : this.defaultHeaders.entrySet()) { if (!headers.containsKey(entry.getKey())) { httpHeaders.set(entry.getKey(), entry.getValue()); } } if (headers != null) { for (Entry<String, String> entry : headers.entrySet()) { if (entry.getValue() == null) { httpHeaders.set(entry.getKey(), ""); } else { httpHeaders.set(entry.getKey(), entry.getValue()); } } } return httpHeaders; } private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException { URI rootUri = request.getEndpointOverride(); if (rootUri == null) { if (request.getIsMedia()) { rootUri = this.globalEndpointManager.getWriteEndpoints().get(0); } else { rootUri = this.globalEndpointManager.resolveServiceEndpoint(request); } } String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed); if(request.getResourceType().equals(ResourceType.DatabaseAccount)) { path = StringUtils.EMPTY; } return new URI("https", null, rootUri.getHost(), rootUri.getPort(), ensureSlashPrefixed(path), null, null); } private String ensureSlashPrefixed(String path) { if (path == null) { return null; } if (path.startsWith("/")) { return path; } return "/" + path; } /** * Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable. * * * Once the customer code subscribes to the observable returned by the CRUD APIs, * the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made. * * @param httpResponseMono * @param request * @return {@link Mono} */ private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono, RxDocumentServiceRequest request, HttpRequest httpRequest) { return httpResponseMono.flatMap(httpResponse -> { HttpHeaders httpResponseHeaders = httpResponse.headers(); int httpResponseStatus = httpResponse.statusCode(); Mono<byte[]> contentObservable = httpResponse .bodyAsByteArray() .switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY)); return contentObservable .map(content -> { ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord(); if (reactorNettyRequestRecord != null) { reactorNettyRequestRecord.setTimeCompleted(Instant.now()); BridgeInternal.setGatewayRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, reactorNettyRequestRecord.takeTimelineSnapshot()); } validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content); StoreResponse rsp = new StoreResponse(httpResponseStatus, HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()), content); DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot()); if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null); DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics); } return rsp; }) .single(); }).map(rsp -> { if (httpRequest.reactorNettyRequestRecord() != null) { return new RxDocumentServiceResponse(this.clientContext, rsp, httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot()); } else { return new RxDocumentServiceResponse(this.clientContext, rsp); } }).onErrorResume(throwable -> { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (!(unwrappedException instanceof Exception)) { logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException); return Mono.error(unwrappedException); } Exception exception = (Exception) unwrappedException; CosmosException dce; if (!(exception instanceof CosmosException)) { logger.error("Network failure", exception); dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, 0, exception); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); } else { dce = (CosmosException) exception; } if (WebExceptionUtility.isNetworkFailure(dce)) { if (WebExceptionUtility.isReadTimeoutException(dce)) { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); } else { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE); } } if (request.requestContext.cosmosDiagnostics != null) { if (BridgeInternal.getClientSideRequestStatics(request.requestContext.cosmosDiagnostics).getGatewayRequestTimeline() == null && httpRequest.reactorNettyRequestRecord() != null) { BridgeInternal.setGatewayRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot()); } BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); }); } private void validateOrThrow(RxDocumentServiceRequest request, HttpResponseStatus status, HttpHeaders headers, byte[] bodyAsBytes) { int statusCode = status.code(); if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) { String statusCodeString = status.reasonPhrase() != null ? status.reasonPhrase().replace(" ", "") : ""; String body = bodyAsBytes != null ? new String(bodyAsBytes) : null; CosmosError cosmosError; cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError(); cosmosError = new CosmosError(statusCodeString, String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString), cosmosError.getPartitionedQueryExecutionInfo()); CosmosException dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, statusCode, cosmosError, headers.toMap()); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); throw dce; } } private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) { switch (request.getOperationType()) { case Create: case Batch: return this.create(request); case Patch: return this.patch(request); case Upsert: return this.upsert(request); case Delete: if (request.getResourceType() == ResourceType.PartitionKey) { return this.deleteByPartitionKey(request); } return this.delete(request); case ExecuteJavaScript: return this.execute(request); case Read: return this.read(request); case ReadFeed: return this.readFeed(request); case Replace: return this.replace(request); case SqlQuery: case Query: case QueryPlan: return this.query(request); default: throw new IllegalStateException("Unknown operation setType " + request.getOperationType()); } } private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) { Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single(); return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy(BridgeInternal.getRetryContext(request.requestContext.cosmosDiagnostics))); } @Override public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) { Mono<RxDocumentServiceResponse> responseObs = this.addIntendedCollectionRidAndSessionToken(request).then(invokeAsync(request)); return responseObs.onErrorResume( e -> { CosmosException dce = Utils.as(e, CosmosException.class); if (dce == null) { logger.error("unexpected failure {}", e.getMessage(), e); return Mono.error(e); } if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) && (dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED || dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT || ( dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND && !Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) { this.captureSessionToken(request, dce.getResponseHeaders()); } if (Exceptions.isThroughputControlRequestRateTooLargeException(dce)) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); } ).flatMap(response -> this.captureSessionTokenAndHandlePartitionSplit(request, response.getResponseHeaders()).then(Mono.just(response)) ); } @Override public void enableThroughputControl(ThroughputControlStore throughputControlStore) { } private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { if (request.getResourceType() == ResourceType.DocumentCollection && request.getOperationType() == OperationType.Delete) { String resourceId; if (request.getIsNameBased()) { resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID); } else { resourceId = request.getResourceId(); } this.sessionContainer.clearTokenByResourceId(resourceId); } else { this.sessionContainer.setSessionToken(request, responseHeaders); } } private Mono<Void> captureSessionTokenAndHandlePartitionSplit(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { this.captureSessionToken(request, responseHeaders); if (request.requestContext.resolvedPartitionKeyRange != null && StringUtils.isNotEmpty(request.requestContext.resolvedCollectionRid) && StringUtils.isNotEmpty(responseHeaders.get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID)) && !responseHeaders.get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID).equals(request.requestContext.resolvedPartitionKeyRange.getId())) { return this.partitionKeyRangeCache.refreshAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request.requestContext.resolvedCollectionRid) .flatMap(collectionRoutingMapValueHolder -> Mono.empty()); } return Mono.empty(); } private Mono<Void> addIntendedCollectionRidAndSessionToken(RxDocumentServiceRequest request) { return applySessionToken(request).then(addIntendedCollectionRid(request)); } private Mono<Void> addIntendedCollectionRid(RxDocumentServiceRequest request) { if (this.collectionCache != null && request.getResourceType().equals(ResourceType.Document)) { return this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request).flatMap(documentCollectionValueHolder -> { if (StringUtils.isEmpty(request.getHeaders().get(INTENDED_COLLECTION_RID_HEADER))) { request.getHeaders().put(INTENDED_COLLECTION_RID_HEADER, request.requestContext.resolvedCollectionRid); } else { request.intendedCollectionRidPassedIntoSDK = true; } return Mono.empty(); }); } return Mono.empty(); } private static boolean isMasterOperation(ResourceType resourceType, OperationType operationType) { return ReplicatedResourceClientUtils.isMasterResource(resourceType) || isStoredProcedureMasterOperation(resourceType, operationType) || operationType == OperationType.QueryPlan; } private static boolean isStoredProcedureMasterOperation(ResourceType resourceType, OperationType operationType) { return resourceType == ResourceType.StoredProcedure && operationType != OperationType.ExecuteJavaScript; } }
Do we have any plans for Search to minimize these customizations? There's a lot of customization happening here and we should look into which ones can be moved to swagger.
private static Javadoc createJavadoc(String description, JavadocBlockTag... tags) { Javadoc javadoc = new Javadoc(JavadocDescription.parseText(description)); for (JavadocBlockTag tag : tags) { javadoc.addBlockTag(tag); } return javadoc; }
}
private static Javadoc createJavadoc(String description, JavadocBlockTag... tags) { Javadoc javadoc = new Javadoc(JavadocDescription.parseText(description)); for (JavadocBlockTag tag : tags) { javadoc.addBlockTag(tag); } return javadoc; }
class modifiers to 'public final'. bulkAddClassModifier(publicCustomization, Modifier.Keyword.FINAL, "BM25SimilarityAlgorithm", "ClassicSimilarityAlgorithm", "HighWaterMarkChangeDetectionPolicy", "SqlIntegratedChangeTrackingPolicy", "SoftDeleteColumnDeletionDetectionPolicy", "MappingCharFilter", "PatternReplaceCharFilter", "DefaultCognitiveServicesAccount", "ConditionalSkill", "KeyPhraseExtractionSkill", "LanguageDetectionSkill", "ShaperSkill", "MergeSkill", "SplitSkill", "TextTranslationSkill", "DocumentExtractionSkill", "WebApiSkill"); bulkRemoveMethod(publicCustomization, "getOdataType", "BM25SimilarityAlgorithm", "ClassicSimilarityAlgorithm", "ConditionalSkill", "DefaultCognitiveServicesAccount", "DocumentExtractionSkill", "EntityLinkingSkill", "HighWaterMarkChangeDetectionPolicy", "KeyPhraseExtractionSkill", "LanguageDetectionSkill", "MappingCharFilter", "MergeSkill", "PatternReplaceCharFilter", "PiiDetectionSkill", "SearchIndexerDataNoneIdentity", "SearchIndexerDataUserAssignedIdentity", "ShaperSkill", "SoftDeleteColumnDeletionDetectionPolicy", "SplitSkill", "SqlIntegratedChangeTrackingPolicy", "TextTranslationSkill", "WebApiSkill"); bulkRemoveMethod(publicCustomization, "getType", "DistanceScoringFunction", "FreshnessScoringFunction", "MagnitudeScoringFunction", "TagScoringFunction"); addVarArgsOverload(publicCustomization.getClass("InputFieldMappingEntry"), "inputs", "InputFieldMappingEntry"); addVarArgsOverload(publicCustomization.getClass("ScoringProfile"), "functions", "ScoringFunction"); customizeMagnitudeScoringParameters(publicCustomization.getClass("MagnitudeScoringParameters")); customizeSearchFieldDataType(publicCustomization.getClass("SearchFieldDataType")); customizeSimilarityAlgorithm(publicCustomization.getClass("SimilarityAlgorithm")); customizeCognitiveServicesAccountKey(publicCustomization.getClass("CognitiveServicesAccountKey")); customizeOcrSkill(publicCustomization.getClass("OcrSkill")); customizeImageAnalysisSkill(publicCustomization.getClass("ImageAnalysisSkill")); customizeEntityRecognitionSkill(publicCustomization.getClass("EntityRecognitionSkill"), implCustomization.getClass("EntityRecognitionSkillV3")); customizeCustomEntityLookupSkill(publicCustomization.getClass("CustomEntityLookupSkill")); customizeCustomNormalizer(publicCustomization.getClass("CustomNormalizer")); customizeSearchField(publicCustomization.getClass("SearchField")); customizeSynonymMap(publicCustomization.getClass("SynonymMap")); customizeSearchResourceEncryptionKey(publicCustomization.getClass("SearchResourceEncryptionKey"), implCustomization.getClass("AzureActiveDirectoryApplicationCredentials")); customizeSearchSuggester(publicCustomization.getClass("SearchSuggester")); customizeCustomAnalyzer(publicCustomization.getClass("CustomAnalyzer")); customizePatternAnalyzer(publicCustomization.getClass("PatternAnalyzer")); customizeLuceneStandardAnalyzer(publicCustomization.getClass("LuceneStandardAnalyzer")); customizeStopAnalyzer(publicCustomization.getClass("StopAnalyzer")); customizeSearchIndexerSkillset(publicCustomization.getClass("SearchIndexerSkillset")); customizeSearchIndexerSkill(publicCustomization.getClass("SearchIndexerSkill")); customizeSentimentSkill(publicCustomization.getClass("SentimentSkill"), implCustomization.getClass("SentimentSkillV3")); addKnowledgeStoreProjectionFluentSetterOverrides( publicCustomization.getClass("SearchIndexerKnowledgeStoreBlobProjectionSelector"), publicCustomization.getClass("SearchIndexerKnowledgeStoreFileProjectionSelector"), publicCustomization.getClass("SearchIndexerKnowledgeStoreObjectProjectionSelector"), publicCustomization.getClass("SearchIndexerKnowledgeStoreTableProjectionSelector")); } private void customizeSearchFieldDataType(ClassCustomization classCustomization) { classCustomization.customizeAst(compilationUnit -> compilationUnit.getClassByName(classCustomization.getClassName()).get() .addMethod("collection", Modifier.Keyword.PUBLIC, Modifier.Keyword.STATIC) .setType("SearchFieldDataType") .addParameter("SearchFieldDataType", "dataType") .addMarkerAnnotation("JsonCreator") .setBody(new BlockStmt(new NodeList<>(new ReturnStmt("fromString(String.format(\"Collection(%s)\", dataType.toString()))")))) .setJavadocComment(new Javadoc(new JavadocDescription(Collections.singletonList(() -> "Returns a collection of a specific SearchFieldDataType"))) .addBlockTag(JavadocBlockTag.createParamBlockTag("dataType", "the corresponding SearchFieldDataType")) .addBlockTag("return", "a Collection of the corresponding SearchFieldDataType"))); }
class modifiers to 'public final'. bulkAddClassModifier(publicCustomization, Modifier.Keyword.FINAL, "BM25SimilarityAlgorithm", "ClassicSimilarityAlgorithm", "HighWaterMarkChangeDetectionPolicy", "SqlIntegratedChangeTrackingPolicy", "SoftDeleteColumnDeletionDetectionPolicy", "MappingCharFilter", "PatternReplaceCharFilter", "DefaultCognitiveServicesAccount", "ConditionalSkill", "KeyPhraseExtractionSkill", "LanguageDetectionSkill", "ShaperSkill", "MergeSkill", "SplitSkill", "TextTranslationSkill", "DocumentExtractionSkill", "WebApiSkill"); bulkRemoveMethod(publicCustomization, "getOdataType", "BM25SimilarityAlgorithm", "ClassicSimilarityAlgorithm", "ConditionalSkill", "DefaultCognitiveServicesAccount", "DocumentExtractionSkill", "EntityLinkingSkill", "HighWaterMarkChangeDetectionPolicy", "KeyPhraseExtractionSkill", "LanguageDetectionSkill", "MappingCharFilter", "MergeSkill", "PatternReplaceCharFilter", "PiiDetectionSkill", "SearchIndexerDataNoneIdentity", "SearchIndexerDataUserAssignedIdentity", "ShaperSkill", "SoftDeleteColumnDeletionDetectionPolicy", "SplitSkill", "SqlIntegratedChangeTrackingPolicy", "TextTranslationSkill", "WebApiSkill"); bulkRemoveMethod(publicCustomization, "getType", "DistanceScoringFunction", "FreshnessScoringFunction", "MagnitudeScoringFunction", "TagScoringFunction"); addVarArgsOverload(publicCustomization.getClass("InputFieldMappingEntry"), "inputs", "InputFieldMappingEntry"); addVarArgsOverload(publicCustomization.getClass("ScoringProfile"), "functions", "ScoringFunction"); customizeMagnitudeScoringParameters(publicCustomization.getClass("MagnitudeScoringParameters")); customizeSearchFieldDataType(publicCustomization.getClass("SearchFieldDataType")); customizeSimilarityAlgorithm(publicCustomization.getClass("SimilarityAlgorithm")); customizeCognitiveServicesAccountKey(publicCustomization.getClass("CognitiveServicesAccountKey")); customizeOcrSkill(publicCustomization.getClass("OcrSkill")); customizeImageAnalysisSkill(publicCustomization.getClass("ImageAnalysisSkill")); customizeEntityRecognitionSkill(publicCustomization.getClass("EntityRecognitionSkill"), implCustomization.getClass("EntityRecognitionSkillV3")); customizeCustomEntityLookupSkill(publicCustomization.getClass("CustomEntityLookupSkill")); customizeCustomNormalizer(publicCustomization.getClass("CustomNormalizer")); customizeSearchField(publicCustomization.getClass("SearchField")); customizeSynonymMap(publicCustomization.getClass("SynonymMap")); customizeSearchResourceEncryptionKey(publicCustomization.getClass("SearchResourceEncryptionKey"), implCustomization.getClass("AzureActiveDirectoryApplicationCredentials")); customizeSearchSuggester(publicCustomization.getClass("SearchSuggester")); customizeCustomAnalyzer(publicCustomization.getClass("CustomAnalyzer")); customizePatternAnalyzer(publicCustomization.getClass("PatternAnalyzer")); customizeLuceneStandardAnalyzer(publicCustomization.getClass("LuceneStandardAnalyzer")); customizeStopAnalyzer(publicCustomization.getClass("StopAnalyzer")); customizeSearchIndexerSkillset(publicCustomization.getClass("SearchIndexerSkillset")); customizeSearchIndexerSkill(publicCustomization.getClass("SearchIndexerSkill")); customizeSentimentSkill(publicCustomization.getClass("SentimentSkill"), implCustomization.getClass("SentimentSkillV3")); addKnowledgeStoreProjectionFluentSetterOverrides( publicCustomization.getClass("SearchIndexerKnowledgeStoreBlobProjectionSelector"), publicCustomization.getClass("SearchIndexerKnowledgeStoreFileProjectionSelector"), publicCustomization.getClass("SearchIndexerKnowledgeStoreObjectProjectionSelector"), publicCustomization.getClass("SearchIndexerKnowledgeStoreTableProjectionSelector")); } private void customizeSearchFieldDataType(ClassCustomization classCustomization) { classCustomization.customizeAst(compilationUnit -> compilationUnit.getClassByName(classCustomization.getClassName()).get() .addMethod("collection", Modifier.Keyword.PUBLIC, Modifier.Keyword.STATIC) .setType("SearchFieldDataType") .addParameter("SearchFieldDataType", "dataType") .addMarkerAnnotation("JsonCreator") .setBody(new BlockStmt(new NodeList<>(new ReturnStmt("fromString(String.format(\"Collection(%s)\", dataType.toString()))")))) .setJavadocComment(new Javadoc(new JavadocDescription(Collections.singletonList(() -> "Returns a collection of a specific SearchFieldDataType"))) .addBlockTag(JavadocBlockTag.createParamBlockTag("dataType", "the corresponding SearchFieldDataType")) .addBlockTag("return", "a Collection of the corresponding SearchFieldDataType"))); }
shouldnt we let PMA throw these errors, and not the SDK
private RedirectCallRequest getRedirectCallRequest(String incomingCallContext, CommunicationIdentifier target) { Objects.requireNonNull(incomingCallContext, "'redirectCallRequest' cannot be null."); Objects.requireNonNull(target, "'target' cannot be null."); RedirectCallRequest request = new RedirectCallRequest() .setIncomingCallContext(incomingCallContext) .setTarget(CommunicationIdentifierConverter.convert(target)); return request; }
Objects.requireNonNull(incomingCallContext, "'redirectCallRequest' cannot be null.");
private RedirectCallRequest getRedirectCallRequest(String incomingCallContext, CommunicationIdentifier target) { RedirectCallRequest request = new RedirectCallRequest() .setIncomingCallContext(incomingCallContext) .setTarget(CommunicationIdentifierConverter.convert(target)); return request; }
class CallingServerAsyncClient { private final CallConnectionsImpl callConnectionInternal; private final ServerCallsImpl serverCallInternal; private final ClientLogger logger = new ClientLogger(CallingServerAsyncClient.class); private final ContentDownloader contentDownloader; private final HttpPipeline httpPipelineInternal; private final String resourceEndpoint; CallingServerAsyncClient(AzureCommunicationCallingServerServiceImpl callServiceClient) { callConnectionInternal = callServiceClient.getCallConnections(); serverCallInternal = callServiceClient.getServerCalls(); httpPipelineInternal = callServiceClient.getHttpPipeline(); resourceEndpoint = callServiceClient.getEndpoint(); contentDownloader = new ContentDownloader( resourceEndpoint, httpPipelineInternal); } /** * Create a call connection request from a source identity to targets identity. * * @param source The source identity. * @param targets The target identities. * @param createCallOptions The call options. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response for a successful CreateCallConnection request. * * <!-- src_embed com.azure.communication.callingserver.CallingServerAsyncClient.create.call.connection.async --> * <pre> * List&lt;CommunicationIdentifier&gt; targets = Arrays.asList& * List&lt;CallMediaType&gt; requestedMediaTypes = Arrays.asList& * List&lt;CallingEventSubscriptionType&gt; requestedCallEvents = Arrays.asList& * CallingEventSubscriptionType.TONE_RECEIVED, * CallingEventSubscriptionType.PARTICIPANTS_UPDATED& * CreateCallOptions createCallOptions = new CreateCallOptions& * URI.create& * requestedMediaTypes, * requestedCallEvents& * CallConnectionAsync callAsyncConnection = callingServerAsyncClient * .createCallConnection& * </pre> * <!-- end com.azure.communication.callingserver.CallingServerAsyncClient.create.call.connection.async --> */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<CallConnectionAsync> createCallConnection( CommunicationIdentifier source, List<CommunicationIdentifier> targets, CreateCallOptions createCallOptions) { try { CreateCallRequest request = CallConnectionRequestConverter.convert(source, targets, createCallOptions); return callConnectionInternal.createCallAsync(request) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .flatMap(response -> Mono.just(new CallConnectionAsync(response.getCallConnectionId(), callConnectionInternal))); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Create a Call Connection Request from source identity to targets identity. * * @param source The source identity. * @param targets The target identities. * @param createCallOptions The call options. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response for a successful CreateCallConnection request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<CallConnectionAsync>> createCallConnectionWithResponse( CommunicationIdentifier source, List<CommunicationIdentifier> targets, CreateCallOptions createCallOptions) { try { CreateCallRequest request = CallConnectionRequestConverter.convert(source, targets, createCallOptions); return callConnectionInternal.createCallWithResponseAsync(request) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .map(response -> new SimpleResponse<>(response, new CallConnectionAsync(response.getValue().getCallConnectionId(), callConnectionInternal))); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<CallConnection> createCallConnectionInternal( CommunicationIdentifier source, List<CommunicationIdentifier> targets, CreateCallOptions createCallOptions) { try { Objects.requireNonNull(source, "'source' cannot be null."); Objects.requireNonNull(targets, "'targets' cannot be null."); Objects.requireNonNull(createCallOptions, "'createCallOptions' cannot be null."); CreateCallRequest request = CallConnectionRequestConverter.convert(source, targets, createCallOptions); return callConnectionInternal.createCallAsync(request) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .flatMap(response -> Mono.just(new CallConnection(new CallConnectionAsync(response.getCallConnectionId(), callConnectionInternal)))); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<CallConnection>> createCallConnectionWithResponseInternal( CommunicationIdentifier source, List<CommunicationIdentifier> targets, CreateCallOptions createCallOptions, Context context) { try { Objects.requireNonNull(source, "'source' cannot be null."); Objects.requireNonNull(targets, "'targets' cannot be null."); Objects.requireNonNull(createCallOptions, "'CreateCallOptions' cannot be null."); CreateCallRequest request = CallConnectionRequestConverter.convert(source, targets, createCallOptions); return withContext(contextValue -> { contextValue = context == null ? contextValue : context; return callConnectionInternal.createCallWithResponseAsync(request, contextValue) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .map(response -> new SimpleResponse<>(response, new CallConnection(new CallConnectionAsync(response.getValue().getCallConnectionId(), callConnectionInternal)))); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Join a Call * * @param callLocator the call locator. * @param source Source identity. * @param joinCallOptions Join call options. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response for a successful join request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<CallConnectionAsync> joinCall( CallLocator callLocator, CommunicationIdentifier source, JoinCallOptions joinCallOptions) { try { return serverCallInternal .joinCallAsync(JoinCallRequestConverter.convert(callLocator, source, joinCallOptions)) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .flatMap(response -> Mono.just(new CallConnectionAsync(response.getCallConnectionId(), callConnectionInternal))); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Join a call * * @param callLocator the call locator. * @param source Source identity. * @param joinCallOptions Join call options. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response for a successful join request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<CallConnectionAsync>> joinCallWithResponse( CallLocator callLocator, CommunicationIdentifier source, JoinCallOptions joinCallOptions) { try { return serverCallInternal. joinCallWithResponseAsync(JoinCallRequestConverter.convert(callLocator, source, joinCallOptions)) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .map(response -> new SimpleResponse<>(response, new CallConnectionAsync(response.getValue().getCallConnectionId(), callConnectionInternal))); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<CallConnection> joinInternal( CallLocator callLocator, CommunicationIdentifier source, JoinCallOptions joinCallOptions) { try { Objects.requireNonNull(source, "'source' cannot be null."); Objects.requireNonNull(joinCallOptions, "'joinCallOptions' cannot be null."); return serverCallInternal .joinCallAsync(JoinCallRequestConverter.convert(callLocator, source, joinCallOptions)) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .flatMap(response -> Mono.just(new CallConnection(new CallConnectionAsync(response.getCallConnectionId(), callConnectionInternal)))); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<CallConnection>>joinWithResponseInternal( CallLocator callLocator, CommunicationIdentifier source, JoinCallOptions joinCallOptions, Context context) { try { Objects.requireNonNull(source, "'source' cannot be null."); Objects.requireNonNull(joinCallOptions, "'joinCallOptions' cannot be null."); return withContext(contextValue -> { contextValue = context == null ? contextValue : context; return serverCallInternal .joinCallWithResponseAsync(JoinCallRequestConverter.convert(callLocator, source, joinCallOptions), contextValue) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .map(response -> new SimpleResponse<>( response, new CallConnection(new CallConnectionAsync(response.getValue().getCallConnectionId(), callConnectionInternal)))); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Answer a Call * * @param incomingCallContext The incoming call context. * @param answerCallOptions Answer call options. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response for a successful answer request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<CallConnectionAsync> answerCall( String incomingCallContext, AnswerCallOptions answerCallOptions) { try { return serverCallInternal .answerCallAsync(AnswerCallRequestConverter.convert(incomingCallContext, answerCallOptions)) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .flatMap(response -> Mono.just(new CallConnectionAsync(response.getCallConnectionId(), callConnectionInternal))); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Answer a Call * * @param incomingCallContext The incoming call context. * @param answerCallOptions Answer call options. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response for a successful answer request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<CallConnectionAsync>> answerCallWithResponse( String incomingCallContext, AnswerCallOptions answerCallOptions) { try { return serverCallInternal .answerCallWithResponseAsync(AnswerCallRequestConverter.convert(incomingCallContext, answerCallOptions)) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .map(response -> new SimpleResponse<>(response, new CallConnectionAsync(response.getValue().getCallConnectionId(), callConnectionInternal))); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<CallConnection> answerInternal( String incomingCallContext, AnswerCallOptions answerCallOptions) { try { Objects.requireNonNull(incomingCallContext, "'incomingCallContext' cannot be null."); return serverCallInternal .answerCallAsync(AnswerCallRequestConverter.convert(incomingCallContext, answerCallOptions)) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .flatMap(response -> Mono.just(new CallConnection(new CallConnectionAsync(response.getCallConnectionId(), callConnectionInternal)))); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<CallConnection>>answerWithResponseInternal( String incomingCallContext, AnswerCallOptions answerCallOptions, Context context) { try { Objects.requireNonNull(incomingCallContext, "'incomingCallContext' cannot be null."); return withContext(contextValue -> { contextValue = context == null ? contextValue : context; return serverCallInternal .answerCallWithResponseAsync(AnswerCallRequestConverter.convert(incomingCallContext, answerCallOptions), contextValue) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .map(response -> new SimpleResponse<>( response, new CallConnection(new CallConnectionAsync(response.getValue().getCallConnectionId(), callConnectionInternal)))); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Get CallConnection object * * @param callConnectionId Call connection id. * @return CallConnection object. */ public CallConnectionAsync getCallConnection(String callConnectionId) { Objects.requireNonNull(callConnectionId, "'callConnectionId' cannot be null."); return new CallConnectionAsync(callConnectionId, callConnectionInternal); } CallConnection getCallConnectionInternal(String callConnectionId) { Objects.requireNonNull(callConnectionId, "'callConnectionId' cannot be null."); return new CallConnection(new CallConnectionAsync(callConnectionId, callConnectionInternal)); } /** * Add a participant to the call. * * @param callLocator the call locator. * @param participant The identifier of the participant. * @param callBackUri callBackUri to get notifications. * @param alternateCallerId Phone number to use when adding a phone number participant. * @param operationContext Value to identify context of the operation. This is used to co-relate other * communications related to this operation * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response for a successful add participant request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<AddParticipantResult> addParticipant( CallLocator callLocator, CommunicationIdentifier participant, URI callBackUri, String alternateCallerId, String operationContext) { try { AddParticipantWithCallLocatorRequest requestWithCallLocator = new AddParticipantWithCallLocatorRequest() .setCallLocator(CallLocatorConverter.convert(callLocator)) .setParticipant(CommunicationIdentifierConverter.convert(participant)) .setAlternateCallerId(PhoneNumberIdentifierConverter.convert(alternateCallerId)) .setOperationContext(operationContext) .setCallbackUri(callBackUri.toString()); return serverCallInternal.addParticipantAsync(requestWithCallLocator, Context.NONE) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .flatMap(result -> Mono.just(new AddParticipantResult(result.getParticipantId()))); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Add a participant to the call. * * @param callLocator the call locator. * @param participant The identifier of the participant. * @param callBackUri callBackUri to get notifications. * @param alternateCallerId Phone number to use when adding a phone number participant. * @param operationContext Value to identify context of the operation. This is used to co-relate other * communications related to this operation * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response for a successful add participant request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<AddParticipantResult>> addParticipantWithResponse( CallLocator callLocator, CommunicationIdentifier participant, URI callBackUri, String alternateCallerId, String operationContext) { return addParticipantWithResponse( callLocator, participant, callBackUri, alternateCallerId, operationContext, null); } Mono<Response<AddParticipantResult>> addParticipantWithResponse( CallLocator callLocator, CommunicationIdentifier participant, URI callBackUri, String alternateCallerId, String operationContext, Context context) { try { Objects.requireNonNull(participant, "'participant' cannot be null."); AddParticipantWithCallLocatorRequest requestWithCallLocator = new AddParticipantWithCallLocatorRequest() .setCallLocator(CallLocatorConverter.convert(callLocator)) .setParticipant(CommunicationIdentifierConverter.convert(participant)) .setAlternateCallerId(PhoneNumberIdentifierConverter.convert(alternateCallerId)) .setOperationContext(operationContext) .setCallbackUri(callBackUri.toString()); return withContext(contextValue -> { contextValue = context == null ? contextValue : context; return serverCallInternal .addParticipantWithResponseAsync(requestWithCallLocator, contextValue) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .map(response -> new SimpleResponse<>(response, new AddParticipantResult(response.getValue().getParticipantId()))); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Remove a participant from the call. * * @param callLocator the call locator. * @param participant The identifier of the participant. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response for a successful remove participant request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> removeParticipant(CallLocator callLocator, CommunicationIdentifier participant) { try { RemoveParticipantWithCallLocatorRequest requestWithCallLocator = getRemoveParticipantWithCallLocatorRequest(callLocator, participant); return serverCallInternal.removeParticipantAsync(requestWithCallLocator) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .flatMap(result -> Mono.empty()); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Remove a participant from the call. * * @param callLocator the call locator. * @param participant The identifier of the participant. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response for a successful remove participant request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> removeParticipantWithResponse(CallLocator callLocator, CommunicationIdentifier participant) { return removeParticipantWithResponse(callLocator, participant, null); } Mono<Response<Void>> removeParticipantWithResponse(CallLocator callLocator, CommunicationIdentifier participant, Context context) { try { RemoveParticipantWithCallLocatorRequest requestWithCallLocator = getRemoveParticipantWithCallLocatorRequest(callLocator, participant); return withContext(contextValue -> { contextValue = context == null ? contextValue : context; return serverCallInternal .removeParticipantWithResponseAsync(requestWithCallLocator, contextValue) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } private RemoveParticipantWithCallLocatorRequest getRemoveParticipantWithCallLocatorRequest(CallLocator callLocator, CommunicationIdentifier participant) { RemoveParticipantWithCallLocatorRequest requestWithCallLocator = new RemoveParticipantWithCallLocatorRequest() .setCallLocator(CallLocatorConverter.convert(callLocator)) .setIdentifier(CommunicationIdentifierConverter.convert(participant)); return requestWithCallLocator; } /** * Get participant from the call. * * @param callLocator the call locator. * @param participant The identifier of the participant. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response for a successful get participant request. */ @ServiceMethod(returns = ReturnType.COLLECTION) public Mono<CallParticipant> getParticipant(CallLocator callLocator, CommunicationIdentifier participant) { try { GetParticipantWithCallLocatorRequest requestWithCallLocator = getGetParticipantWithCallLocatorRequest(callLocator, participant); return serverCallInternal.getParticipantAsync(requestWithCallLocator) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .flatMap(result -> Mono.just(CallParticipantConverter.convert(result))); } catch (RuntimeException ex) { return monoError(logger, ex); } } private GetParticipantWithCallLocatorRequest getGetParticipantWithCallLocatorRequest(CallLocator callLocator, CommunicationIdentifier participant) { GetParticipantWithCallLocatorRequest requestWithCallLocator = new GetParticipantWithCallLocatorRequest() .setCallLocator(CallLocatorConverter.convert(callLocator)) .setIdentifier(CommunicationIdentifierConverter.convert(participant)); return requestWithCallLocator; } /** * Get participant from the call using identifier. * * @param callLocator the call locator. * @param participant The identifier of the participant. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response for a successful get participant request. */ @ServiceMethod(returns = ReturnType.COLLECTION) public Mono<Response<CallParticipant>> getParticipantWithResponse(CallLocator callLocator, CommunicationIdentifier participant) { return getParticipantWithResponse(callLocator, participant, Context.NONE); } Mono<Response<CallParticipant>> getParticipantWithResponse(CallLocator callLocator, CommunicationIdentifier participant, Context context) { try { GetParticipantWithCallLocatorRequest requestWithCallLocator = getGetParticipantWithCallLocatorRequest(callLocator, participant); return withContext(contextValue -> { contextValue = context == null ? contextValue : context; return serverCallInternal.getParticipantWithResponseAsync(requestWithCallLocator, contextValue) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .map(response -> new SimpleResponse<>(response, CallParticipantConverter.convert(response.getValue()) ) ); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Get participants from a server call. * * @param callLocator the call locator. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response for a successful get participants request. */ @ServiceMethod(returns = ReturnType.COLLECTION) public Mono<List<CallParticipant>> getParticipants(CallLocator callLocator) { try { GetAllParticipantsWithCallLocatorRequest requestWithCallLocator = new GetAllParticipantsWithCallLocatorRequest() .setCallLocator(CallLocatorConverter.convert(callLocator)); return serverCallInternal.getParticipantsAsync(requestWithCallLocator) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .flatMap(result -> Mono.just( result.stream().map(CallParticipantConverter::convert).collect(Collectors.toList()))); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Get participants from a server call. * @param callLocator the call locator. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response for a successful get participants request. */ @ServiceMethod(returns = ReturnType.COLLECTION) public Mono<Response<List<CallParticipant>>> getParticipantsWithResponse(CallLocator callLocator) { return getParticipantsWithResponse(callLocator, Context.NONE); } Mono<Response<List<CallParticipant>>> getParticipantsWithResponse(CallLocator callLocator, Context context) { try { GetAllParticipantsWithCallLocatorRequest requestWithCallLocator = new GetAllParticipantsWithCallLocatorRequest() .setCallLocator(CallLocatorConverter.convert(callLocator)); return withContext(contextValue -> { contextValue = context == null ? contextValue : context; return serverCallInternal.getParticipantsWithResponseAsync(requestWithCallLocator, contextValue) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .map(response -> new SimpleResponse<>(response, response.getValue() .stream() .map(CallParticipantConverter::convert) .collect(Collectors.toList() ) ) ); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Start recording of the call. * * * @param callLocator the call locator. * @param recordingStateCallbackUri Uri to send state change callbacks. * @throws InvalidParameterException is recordingStateCallbackUri is absolute uri. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response for a successful start recording request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<StartCallRecordingResult> startRecording(CallLocator callLocator, URI recordingStateCallbackUri) { try { Objects.requireNonNull(recordingStateCallbackUri, "'recordingStateCallbackUri' cannot be null."); if (!Boolean.TRUE.equals(recordingStateCallbackUri.isAbsolute())) { throw logger.logExceptionAsError(new InvalidParameterException("'recordingStateCallbackUri' has to be an absolute Uri")); } StartCallRecordingWithCallLocatorRequest requestWithCallLocator = getStartCallRecordingWithCallLocatorRequest(callLocator, recordingStateCallbackUri); return serverCallInternal.startRecordingAsync(requestWithCallLocator, null) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .flatMap(result -> Mono.just(new StartCallRecordingResult(result.getRecordingId()))); } catch (RuntimeException ex) { return monoError(logger, ex); } } private StartCallRecordingWithCallLocatorRequest getStartCallRecordingWithCallLocatorRequest(CallLocator callLocator, URI recordingStateCallbackUri) { StartCallRecordingWithCallLocatorRequest requestWithCallLocator = new StartCallRecordingWithCallLocatorRequest() .setCallLocator(CallLocatorConverter.convert(callLocator)) .setRecordingStateCallbackUri(recordingStateCallbackUri.toString()); return requestWithCallLocator; } /** * Start recording of the call. * * @param callLocator the call locator. * @param recordingStateCallbackUri Uri to send state change callbacks. * @param startRecordingOptions StartRecordingOptions custom options. * @param context A {@link Context} representing the request context. * @throws InvalidParameterException is recordingStateCallbackUri is absolute uri. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response for a successful start recording request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<StartCallRecordingResult>> startRecordingWithResponse( CallLocator callLocator, URI recordingStateCallbackUri, StartRecordingOptions startRecordingOptions, Context context) { try { Objects.requireNonNull(recordingStateCallbackUri, "'recordingStateCallbackUri' cannot be null."); if (!Boolean.TRUE.equals(recordingStateCallbackUri.isAbsolute())) { throw logger.logExceptionAsError(new InvalidParameterException("'recordingStateCallbackUri' has to be an absolute Uri")); } StartCallRecordingWithCallLocatorRequest requestWithCallLocator = getStartCallRecordingWithCallLocatorRequest(callLocator, recordingStateCallbackUri); return withContext(contextValue -> { contextValue = context == null ? contextValue : context; return serverCallInternal .startRecordingWithResponseAsync(requestWithCallLocator, contextValue) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .map(response -> new SimpleResponse<>(response, new StartCallRecordingResult(response.getValue().getRecordingId()))); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Stop recording of the call. * * @param recordingId Recording id to stop. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response for a successful stop recording request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> stopRecording(String recordingId) { try { return serverCallInternal.stopRecordingAsync(recordingId) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .flatMap(result -> Mono.empty()); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Stop recording of the call. * * @param recordingId Recording id to stop. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response for a successful stop recording request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> stopRecordingWithResponse(String recordingId) { return stopRecordingWithResponse(recordingId, Context.NONE); } Mono<Response<Void>> stopRecordingWithResponse(String recordingId, Context context) { try { return withContext(contextValue -> { contextValue = context == null ? contextValue : context; return serverCallInternal .stopRecordingWithResponseAsync(recordingId, contextValue) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Pause recording of the call. * * @param recordingId Recording id to stop. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response for a successful pause recording request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> pauseRecording(String recordingId) { try { return serverCallInternal.pauseRecordingAsync(recordingId) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .flatMap(result -> Mono.empty()); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Pause recording of the call. * * @param recordingId Recording id to stop. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response for a successful pause recording request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> pauseRecordingWithResponse(String recordingId) { return pauseRecordingWithResponse(recordingId, Context.NONE); } Mono<Response<Void>> pauseRecordingWithResponse(String recordingId, Context context) { try { return withContext(contextValue -> { contextValue = context == null ? contextValue : context; return serverCallInternal .pauseRecordingWithResponseAsync(recordingId, contextValue) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Resume recording of the call. * * @param recordingId Recording id to stop. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return response for a successful resume recording request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> resumeRecording(String recordingId) { try { return serverCallInternal.resumeRecordingAsync(recordingId) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .flatMap(result -> Mono.empty()); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Resume recording of the call. * * @param recordingId Recording id to stop. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return response for a successful resume recording request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> resumeRecordingWithResponse(String recordingId) { return resumeRecordingWithResponse(recordingId, Context.NONE); } Mono<Response<Void>> resumeRecordingWithResponse(String recordingId, Context context) { try { return withContext(contextValue -> { contextValue = context == null ? contextValue : context; return serverCallInternal .resumeRecordingWithResponseAsync(recordingId, contextValue) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Get current recording state by recording id. * * @param recordingId Recording id to stop. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response for a successful get recording state request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<CallRecordingProperties> getRecordingState(String recordingId) { try { return serverCallInternal.getRecordingPropertiesAsync(recordingId) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .flatMap(result -> Mono.just(new CallRecordingProperties(result.getRecordingState()))); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Get current recording state by recording id. * * @param recordingId Recording id to stop. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response for a successful get recording state request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<CallRecordingProperties>> getRecordingStateWithResponse(String recordingId) { return getRecordingStateWithResponse(recordingId, Context.NONE); } Mono<Response<CallRecordingProperties>> getRecordingStateWithResponse(String recordingId, Context context) { try { return withContext(contextValue -> { contextValue = context == null ? contextValue : context; return serverCallInternal .getRecordingPropertiesWithResponseAsync(recordingId, contextValue) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .map(response -> new SimpleResponse<>(response, new CallRecordingProperties(response.getValue().getRecordingState()))); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Download the recording content, e.g. Recording's metadata, Recording video, from the ACS endpoint * passed as parameter. * @param sourceEndpoint - URL where the content is located. * @return A {@link Flux} object containing the byte stream of the content requested. */ @ServiceMethod(returns = ReturnType.COLLECTION) public Flux<ByteBuffer> downloadStream(String sourceEndpoint) { try { return downloadStream(sourceEndpoint, null); } catch (RuntimeException ex) { return fluxError(logger, ex); } } /** * Download the recording content, e.g. Recording's metadata, Recording video, from the ACS endpoint * passed as parameter. * @param sourceEndpoint - URL where the content is located. * @param httpRange - An optional {@link HttpRange} value containing the range of bytes to download. If missing, * the whole content will be downloaded. * @return A {@link Flux} object containing the byte stream of the content requested. */ @ServiceMethod(returns = ReturnType.COLLECTION) public Flux<ByteBuffer> downloadStream(String sourceEndpoint, HttpRange httpRange) { try { return contentDownloader.downloadStreamWithResponse(sourceEndpoint, httpRange, Context.NONE) .map(Response::getValue) .flux() .flatMap(flux -> flux); } catch (RuntimeException ex) { return fluxError(logger, ex); } } /** * Download the recording content, (e.g. Recording's metadata, Recording video, etc.) from the {@code endpoint}. * @param sourceEndpoint - URL where the content is located. * @param range - An optional {@link HttpRange} value containing the range of bytes to download. If missing, * the whole content will be downloaded. * @return A {@link Mono} object containing a {@link Response} with the byte stream of the content requested. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Flux<ByteBuffer>>> downloadStreamWithResponse(String sourceEndpoint, HttpRange range) { try { return contentDownloader.downloadStreamWithResponse(sourceEndpoint, range, Context.NONE); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Download the content located in {@code endpoint} into a file marked by {@code path}. * This download will be done using parallel workers. * @param sourceEndpoint - ACS URL where the content is located. * @param destinationPath - File location. * @param parallelDownloadOptions - an optional {@link ParallelDownloadOptions} object to modify how the parallel * download will work. * @param overwrite - True to overwrite the file if it exists. * @return Response for a successful downloadTo request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> downloadTo( String sourceEndpoint, Path destinationPath, ParallelDownloadOptions parallelDownloadOptions, boolean overwrite) { try { return downloadToWithResponse(sourceEndpoint, destinationPath, parallelDownloadOptions, overwrite, Context.NONE) .then(); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Download the content located in {@code endpoint} into a file marked by {@code path}. * This download will be done using parallel workers. * @param sourceEndpoint - ACS URL where the content is located. * @param destinationPath - File location. * @param parallelDownloadOptions - an optional {@link ParallelDownloadOptions} object to modify how the parallel * download will work. * @param overwrite - True to overwrite the file if it exists. * @return Response containing the http response information from the download. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> downloadToWithResponse( String sourceEndpoint, Path destinationPath, ParallelDownloadOptions parallelDownloadOptions, boolean overwrite) { try { return downloadToWithResponse(sourceEndpoint, destinationPath, parallelDownloadOptions, overwrite, Context.NONE); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> downloadToWithResponse( String sourceEndpoint, OutputStream destinationStream, HttpRange httpRange, Context context) { return contentDownloader.downloadToStreamWithResponse(sourceEndpoint, destinationStream, httpRange, context); } Mono<Response<Void>> downloadToWithResponse( String sourceEndpoint, Path destinationPath, ParallelDownloadOptions parallelDownloadOptions, boolean overwrite, Context context) { Objects.requireNonNull(sourceEndpoint, "'sourceEndpoint' cannot be null"); Objects.requireNonNull(destinationPath, "'destinationPath' cannot be null"); Set<OpenOption> openOptions = new HashSet<>(); if (overwrite) { openOptions.add(StandardOpenOption.CREATE); } else { openOptions.add(StandardOpenOption.CREATE_NEW); } openOptions.add(StandardOpenOption.WRITE); try { AsynchronousFileChannel file = AsynchronousFileChannel.open(destinationPath, openOptions, null); return downloadToWithResponse(sourceEndpoint, destinationPath, file, parallelDownloadOptions, context); } catch (IOException ex) { return monoError(logger, new RuntimeException(ex)); } } Mono<Response<Void>> downloadToWithResponse( String sourceEndpoint, Path destinationPath, AsynchronousFileChannel fileChannel, ParallelDownloadOptions parallelDownloadOptions, Context context ) { ParallelDownloadOptions finalParallelDownloadOptions = parallelDownloadOptions == null ? new ParallelDownloadOptions() : parallelDownloadOptions; return Mono.just(fileChannel).flatMap( c -> contentDownloader.downloadToFileWithResponse(sourceEndpoint, c, finalParallelDownloadOptions, context)) .doFinally(signalType -> contentDownloader.downloadToFileCleanup(fileChannel, destinationPath, signalType)); } /** * Play audio in a call. * * @param callLocator The call locator. * @param audioFileUri The media resource uri of the play audio request. Currently only Wave file (.wav) format * audio prompts are supported. More specifically, the audio content in the wave file must * be mono (single-channel), 16-bit samples with a 16,000 (16KHz) sampling rate. * @param playAudioOptions Options for play audio. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response payload for play audio operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PlayAudioResult> playAudio(CallLocator callLocator, URI audioFileUri, PlayAudioOptions playAudioOptions) { return playAudioInternal(callLocator, audioFileUri, playAudioOptions, Context.NONE); } Mono<PlayAudioResult> playAudioInternal(CallLocator callLocator, URI audioFileUri, PlayAudioOptions playAudioOptions, Context context) { try { Objects.requireNonNull(callLocator, "'callLocator' cannot be null."); Objects.requireNonNull(audioFileUri, "'audioFileUri' cannot be null."); PlayAudioWithCallLocatorRequest requestWithCallLocator = getPlayAudioWithCallLocatorRequest(callLocator, audioFileUri, playAudioOptions); return serverCallInternal.playAudioAsync(requestWithCallLocator, context) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .flatMap(result -> Mono.just(PlayAudioResultConverter.convert(result))); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Play audio in a call. * * @param callLocator The server call id. * @param audioFileUri The media resource uri of the play audio request. Currently only Wave file (.wav) format * audio prompts are supported. More specifically, the audio content in the wave file must * be mono (single-channel), 16-bit samples with a 16,000 (16KHz) sampling rate. * @param playAudioOptions Options for play audio. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response payload for play audio operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<PlayAudioResult>> playAudioWithResponse( CallLocator callLocator, URI audioFileUri, PlayAudioOptions playAudioOptions) { return playAudioWithResponseInternal(callLocator, audioFileUri, playAudioOptions, Context.NONE); } Mono<Response<PlayAudioResult>> playAudioWithResponseInternal( CallLocator callLocator, URI audioFileUri, PlayAudioOptions playAudioOptions, Context context) { try { Objects.requireNonNull(callLocator, "'callLocator' cannot be null."); Objects.requireNonNull(audioFileUri, "'audioFileUri' cannot be null."); PlayAudioWithCallLocatorRequest requestWithCallLocator = getPlayAudioWithCallLocatorRequest(callLocator, audioFileUri, playAudioOptions); return withContext(contextValue -> { contextValue = context == null ? contextValue : context; return serverCallInternal .playAudioWithResponseAsync(requestWithCallLocator, contextValue) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .map(response -> new SimpleResponse<>(response, PlayAudioResultConverter.convert(response.getValue()))); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } private PlayAudioWithCallLocatorRequest getPlayAudioWithCallLocatorRequest(CallLocator callLocator, URI audioFileUri, PlayAudioOptions playAudioOptions) { PlayAudioWithCallLocatorRequest requestWithCallLocator = new PlayAudioWithCallLocatorRequest() .setCallLocator(CallLocatorConverter.convert(callLocator)) .setAudioFileUri(audioFileUri.toString()); if (playAudioOptions != null) { requestWithCallLocator .setLoop(playAudioOptions.isLoop()) .setOperationContext(playAudioOptions.getOperationContext()) .setAudioFileId(playAudioOptions.getAudioFileId()) .setCallbackUri(playAudioOptions.getCallbackUri().toString()); } return requestWithCallLocator; } /** * Cancel Media Operation. * * @param callLocator The server call id. * @param mediaOperationId The Id of the media operation to Cancel. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response payload for play audio operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> cancelMediaOperation( CallLocator callLocator, String mediaOperationId) { return cancelMediaOperationWithResponseInternal(callLocator, mediaOperationId, Context.NONE); } /** * Cancel Media Operation. * * @param callLocator The server call id. * @param mediaOperationId The Id of the media operation to Cancel. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response payload for play audio operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> cancelMediaOperationWithResponse( CallLocator callLocator, String mediaOperationId) { return cancelMediaOperationWithResponseInternal(callLocator, mediaOperationId, Context.NONE); } /** * Cancel Media Operation. * * @param callLocator The server call id. * @param mediaOperationId The Id of the media operation to Cancel. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response payload for play audio operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> cancelParticipantMediaOperation( CallLocator callLocator, String mediaOperationId) { return cancelMediaOperationWithResponseInternal(callLocator, mediaOperationId, Context.NONE); } Mono<Response<Void>> cancelMediaOperationWithResponseInternal( CallLocator callLocator, String mediaOperationId, Context context) { try { Objects.requireNonNull(callLocator, "'callLocator' cannot be null."); Objects.requireNonNull(mediaOperationId, "'mediaOperationId' cannot be null."); CancelMediaOperationWithCallLocatorRequest requestWithCallLocator = new CancelMediaOperationWithCallLocatorRequest() .setCallLocator(CallLocatorConverter.convert(callLocator)) .setMediaOperationId(mediaOperationId); return withContext(contextValue -> { contextValue = context == null ? contextValue : context; return serverCallInternal .cancelMediaOperationWithResponseAsync(requestWithCallLocator, contextValue) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Cancel Participant Media Operation. * * @param callLocator The server call id. * @param participant The identifier of the participant. * @param mediaOperationId The Id of the media operation to Cancel. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response payload for play audio operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> cancelParticipantMediaOperationWithResponse( CallLocator callLocator, CommunicationIdentifier participant, String mediaOperationId) { return cancelParticipantMediaOperationWithResponseInternal(callLocator, participant, mediaOperationId, Context.NONE); } /** * Cancel Participant Media Operation. * * @param callLocator The server call id. * @param participant The identifier of the participant. * @param mediaOperationId The Id of the media operation to Cancel. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response payload for play audio operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> cancelParticipantMediaOperation( CallLocator callLocator, CommunicationIdentifier participant, String mediaOperationId) { return cancelParticipantMediaOperationWithResponseInternal(callLocator, participant, mediaOperationId, Context.NONE) .flatMap(result -> Mono.empty()); } Mono<Response<Void>> cancelParticipantMediaOperationWithResponseInternal( CallLocator callLocator, CommunicationIdentifier participant, String mediaOperationId, Context context) { try { CancelParticipantMediaOperationWithCallLocatorRequest requestWithCallLocator = new CancelParticipantMediaOperationWithCallLocatorRequest() .setCallLocator(CallLocatorConverter.convert(callLocator)) .setIdentifier(CommunicationIdentifierConverter.convert(participant)) .setMediaOperationId(mediaOperationId); return withContext(contextValue -> { contextValue = context == null ? contextValue : context; return serverCallInternal .cancelParticipantMediaOperationWithResponseAsync(requestWithCallLocator, contextValue) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Play audio to a participant. * * @param callLocator The server call id. * @param participant The identifier of the participant. * @param audioFileUri The media resource uri of the play audio request. Currently only Wave file (.wav) format * audio prompts are supported. More specifically, the audio content in the wave file must * be mono (single-channel), 16-bit samples with a 16,000 (16KHz) sampling rate. * @param playAudioOptions Options for play audio. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response payload for play audio to participant operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PlayAudioResult> playAudioToParticipant(CallLocator callLocator, CommunicationIdentifier participant, URI audioFileUri, PlayAudioOptions playAudioOptions) { return playAudioToParticipantInternal(callLocator, participant, audioFileUri, playAudioOptions, Context.NONE); } Mono<PlayAudioResult> playAudioToParticipantInternal(CallLocator callLocator, CommunicationIdentifier participant, URI audioFileUri, PlayAudioOptions playAudioOptions, Context context) { try { Objects.requireNonNull(callLocator, "'callLocator' cannot be null."); Objects.requireNonNull(participant, "'participant' cannot be null."); Objects.requireNonNull(audioFileUri, "'audioFileUri' cannot be null."); PlayAudioToParticipantWithCallLocatorRequest requestWithCallLocator = new PlayAudioToParticipantWithCallLocatorRequest() .setCallLocator(CallLocatorConverter.convert(callLocator)) .setIdentifier(CommunicationIdentifierConverter.convert(participant)) .setAudioFileUri(audioFileUri.toString()); if (playAudioOptions != null) { requestWithCallLocator .setLoop(playAudioOptions.isLoop()) .setOperationContext(playAudioOptions.getOperationContext()) .setAudioFileId(playAudioOptions.getAudioFileId()) .setCallbackUri(playAudioOptions.getCallbackUri().toString()); } return serverCallInternal.participantPlayAudioAsync(requestWithCallLocator, context) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .flatMap(result -> Mono.just(PlayAudioResultConverter.convert(result))); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Play audio to a participant. * * @param callLocator The server call id. * @param participant The identifier of the participant. * @param audioFileUri The media resource uri of the play audio request. Currently only Wave file (.wav) format * audio prompts are supported. More specifically, the audio content in the wave file must * be mono (single-channel), 16-bit samples with a 16,000 (16KHz) sampling rate. * @param playAudioOptions Options for play audio. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response payload for play audio operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<PlayAudioResult>> playAudioToParticipantWithResponse( CallLocator callLocator, CommunicationIdentifier participant, URI audioFileUri, PlayAudioOptions playAudioOptions) { return playAudioToParticipantWithResponseInternal(callLocator, participant, audioFileUri, playAudioOptions, Context.NONE); } Mono<Response<PlayAudioResult>> playAudioToParticipantWithResponseInternal( CallLocator callLocator, CommunicationIdentifier participant, URI audioFileUri, PlayAudioOptions playAudioOptions, Context context) { try { Objects.requireNonNull(callLocator, "'callLocator' cannot be null."); Objects.requireNonNull(participant, "'participant' cannot be null."); Objects.requireNonNull(audioFileUri, "'audioFileUri' cannot be null."); PlayAudioToParticipantWithCallLocatorRequest requestWithCallLocator = new PlayAudioToParticipantWithCallLocatorRequest() .setCallLocator(CallLocatorConverter.convert(callLocator)) .setIdentifier(CommunicationIdentifierConverter.convert(participant)) .setAudioFileUri(audioFileUri.toString()); if (playAudioOptions != null) { requestWithCallLocator .setLoop(playAudioOptions.isLoop()) .setOperationContext(playAudioOptions.getOperationContext()) .setAudioFileId(playAudioOptions.getAudioFileId()) .setCallbackUri(playAudioOptions.getCallbackUri().toString()); } return withContext(contextValue -> { contextValue = context == null ? contextValue : context; return serverCallInternal .participantPlayAudioWithResponseAsync(requestWithCallLocator, contextValue) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .map(response -> new SimpleResponse<>(response, PlayAudioResultConverter.convert(response.getValue()))); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Redirect the call. * * @param incomingCallContext the incomingCallContext value to set. * @param target the target value to set. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws CommunicationErrorResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the completion. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> redirectCall(String incomingCallContext, CommunicationIdentifier target) { try { RedirectCallRequest request = getRedirectCallRequest(incomingCallContext, target); return serverCallInternal.redirectCallAsync(request) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Redirect the call. * * @param incomingCallContext the incomingCallContext value to set. * @param target the target value to set. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws CommunicationErrorResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the completion. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> redirectCallWithResponse(String incomingCallContext, CommunicationIdentifier target) { return redirectCallWithResponseInternal(incomingCallContext, target, Context.NONE); } Mono<Response<Void>> redirectCallWithResponseInternal(String incomingCallContext, CommunicationIdentifier target, Context context) { try { RedirectCallRequest request = getRedirectCallRequest(incomingCallContext, target); return serverCallInternal.redirectCallWithResponseAsync(request, context) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Reject the call. * * @param incomingCallContext the incomingCallContext value to set. * @param rejectReason the call reject reason value to set. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws CommunicationErrorResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the completion. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> rejectCall(String incomingCallContext, CallRejectReason rejectReason) { try { RejectCallRequest request = getRejectCallRequest(incomingCallContext, rejectReason); return serverCallInternal.rejectCallAsync(request) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException); } catch (RuntimeException ex) { return monoError(logger, ex); } } private RejectCallRequest getRejectCallRequest(String incomingCallContext, CallRejectReason rejectReason) { Objects.requireNonNull(incomingCallContext, "'redirectCallRequest' cannot be null."); RejectCallRequest request = new RejectCallRequest() .setIncomingCallContext(incomingCallContext) .setCallRejectReason(rejectReason); return request; } /** * Reject the call. * * @param incomingCallContext the incomingCallContext value to set. * @param rejectReason the call reject reason value to set. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws CommunicationErrorResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the completion. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> rejectCallWithResponse(String incomingCallContext, CallRejectReason rejectReason) { return rejectCallWithResponseInternal(incomingCallContext, rejectReason, Context.NONE); } Mono<Response<Void>> rejectCallWithResponseInternal(String incomingCallContext, CallRejectReason rejectReason, Context context) { try { RejectCallRequest request = getRejectCallRequest(incomingCallContext, rejectReason); return serverCallInternal.rejectCallWithResponseAsync(request, context) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Delete the content located at the deleteEndpoint * @param deleteEndpoint - ACS URL where the content is located. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response for successful delete request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteRecording(String deleteEndpoint) { try { return deleteRecordingWithResponse(deleteEndpoint, null).then(); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Delete the content located at the deleteEndpoint * Recording deletion will be done using parallel workers. * @param deleteEndpoint - ACS URL where the content is located. * @param context A {@link Context} representing the request context. * @return Response for successful delete request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<HttpResponse>> deleteRecordingWithResponse(String deleteEndpoint, Context context) { HttpRequest request = new HttpRequest(HttpMethod.DELETE, deleteEndpoint); URL urlToSignWith = getUrlToSignRequestWith(deleteEndpoint); Context finalContext; if (context == null) { finalContext = new Context("hmacSignatureURL", urlToSignWith); } else { finalContext = context.addData("hmacSignatureURL", urlToSignWith); } Mono<HttpResponse> httpResponse = httpPipelineInternal.send(request, finalContext); try { return httpResponse.map(response -> new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null)); } catch (RuntimeException ex) { return monoError(logger, ex); } } private URL getUrlToSignRequestWith(String endpoint) { try { String path = new URL(endpoint).getPath(); if (path.startsWith("/")) { path = path.substring(1); } return new URL(resourceEndpoint + path); } catch (MalformedURLException ex) { throw logger.logExceptionAsError(new IllegalArgumentException(ex)); } } }
class CallingServerAsyncClient { private final CallConnectionsImpl callConnectionInternal; private final ServerCallsImpl serverCallInternal; private final ClientLogger logger = new ClientLogger(CallingServerAsyncClient.class); private final ContentDownloader contentDownloader; private final HttpPipeline httpPipelineInternal; private final String resourceEndpoint; CallingServerAsyncClient(AzureCommunicationCallingServerServiceImpl callServiceClient) { callConnectionInternal = callServiceClient.getCallConnections(); serverCallInternal = callServiceClient.getServerCalls(); httpPipelineInternal = callServiceClient.getHttpPipeline(); resourceEndpoint = callServiceClient.getEndpoint(); contentDownloader = new ContentDownloader( resourceEndpoint, httpPipelineInternal); } /** * Create a call connection request from a source identity to targets identity. * * @param source The source identity. * @param targets The target identities. * @param createCallOptions The call options. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response for a successful CreateCallConnection request. * * <!-- src_embed com.azure.communication.callingserver.CallingServerAsyncClient.create.call.connection.async --> * <pre> * List&lt;CommunicationIdentifier&gt; targets = Arrays.asList& * List&lt;CallMediaType&gt; requestedMediaTypes = Arrays.asList& * List&lt;CallingEventSubscriptionType&gt; requestedCallEvents = Arrays.asList& * CallingEventSubscriptionType.TONE_RECEIVED, * CallingEventSubscriptionType.PARTICIPANTS_UPDATED& * CreateCallOptions createCallOptions = new CreateCallOptions& * URI.create& * requestedMediaTypes, * requestedCallEvents& * CallConnectionAsync callAsyncConnection = callingServerAsyncClient * .createCallConnection& * </pre> * <!-- end com.azure.communication.callingserver.CallingServerAsyncClient.create.call.connection.async --> */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<CallConnectionAsync> createCallConnection( CommunicationIdentifier source, List<CommunicationIdentifier> targets, CreateCallOptions createCallOptions) { try { CreateCallRequest request = CallConnectionRequestConverter.convert(source, targets, createCallOptions); return callConnectionInternal.createCallAsync(request) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .flatMap(response -> Mono.just(new CallConnectionAsync(response.getCallConnectionId(), callConnectionInternal))); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Create a Call Connection Request from source identity to targets identity. * * @param source The source identity. * @param targets The target identities. * @param createCallOptions The call options. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response for a successful CreateCallConnection request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<CallConnectionAsync>> createCallConnectionWithResponse( CommunicationIdentifier source, List<CommunicationIdentifier> targets, CreateCallOptions createCallOptions) { try { CreateCallRequest request = CallConnectionRequestConverter.convert(source, targets, createCallOptions); return callConnectionInternal.createCallWithResponseAsync(request) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .map(response -> new SimpleResponse<>(response, new CallConnectionAsync(response.getValue().getCallConnectionId(), callConnectionInternal))); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<CallConnection> createCallConnectionInternal( CommunicationIdentifier source, List<CommunicationIdentifier> targets, CreateCallOptions createCallOptions) { try { CreateCallRequest request = CallConnectionRequestConverter.convert(source, targets, createCallOptions); return callConnectionInternal.createCallAsync(request) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .flatMap(response -> Mono.just(new CallConnection(new CallConnectionAsync(response.getCallConnectionId(), callConnectionInternal)))); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<CallConnection>> createCallConnectionWithResponseInternal( CommunicationIdentifier source, List<CommunicationIdentifier> targets, CreateCallOptions createCallOptions, Context context) { try { CreateCallRequest request = CallConnectionRequestConverter.convert(source, targets, createCallOptions); return withContext(contextValue -> { contextValue = context == null ? contextValue : context; return callConnectionInternal.createCallWithResponseAsync(request, contextValue) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .map(response -> new SimpleResponse<>(response, new CallConnection(new CallConnectionAsync(response.getValue().getCallConnectionId(), callConnectionInternal)))); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Join a Call * * @param callLocator the call locator. * @param source Source identity. * @param joinCallOptions Join call options. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response for a successful join request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<CallConnectionAsync> joinCall( CallLocator callLocator, CommunicationIdentifier source, JoinCallOptions joinCallOptions) { try { return serverCallInternal .joinCallAsync(JoinCallRequestConverter.convert(callLocator, source, joinCallOptions)) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .flatMap(response -> Mono.just(new CallConnectionAsync(response.getCallConnectionId(), callConnectionInternal))); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Join a call * * @param callLocator the call locator. * @param source Source identity. * @param joinCallOptions Join call options. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response for a successful join request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<CallConnectionAsync>> joinCallWithResponse( CallLocator callLocator, CommunicationIdentifier source, JoinCallOptions joinCallOptions) { try { return serverCallInternal. joinCallWithResponseAsync(JoinCallRequestConverter.convert(callLocator, source, joinCallOptions)) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .map(response -> new SimpleResponse<>(response, new CallConnectionAsync(response.getValue().getCallConnectionId(), callConnectionInternal))); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<CallConnection> joinInternal( CallLocator callLocator, CommunicationIdentifier source, JoinCallOptions joinCallOptions) { try { return serverCallInternal .joinCallAsync(JoinCallRequestConverter.convert(callLocator, source, joinCallOptions)) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .flatMap(response -> Mono.just(new CallConnection(new CallConnectionAsync(response.getCallConnectionId(), callConnectionInternal)))); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<CallConnection>>joinWithResponseInternal( CallLocator callLocator, CommunicationIdentifier source, JoinCallOptions joinCallOptions, Context context) { try { return withContext(contextValue -> { contextValue = context == null ? contextValue : context; return serverCallInternal .joinCallWithResponseAsync(JoinCallRequestConverter.convert(callLocator, source, joinCallOptions), contextValue) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .map(response -> new SimpleResponse<>( response, new CallConnection(new CallConnectionAsync(response.getValue().getCallConnectionId(), callConnectionInternal)))); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Answer a Call * * @param incomingCallContext The incoming call context. * @param answerCallOptions Answer call options. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response for a successful answer request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<CallConnectionAsync> answerCall( String incomingCallContext, AnswerCallOptions answerCallOptions) { try { return serverCallInternal .answerCallAsync(AnswerCallRequestConverter.convert(incomingCallContext, answerCallOptions)) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .flatMap(response -> Mono.just(new CallConnectionAsync(response.getCallConnectionId(), callConnectionInternal))); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Answer a Call * * @param incomingCallContext The incoming call context. * @param answerCallOptions Answer call options. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response for a successful answer request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<CallConnectionAsync>> answerCallWithResponse( String incomingCallContext, AnswerCallOptions answerCallOptions) { try { return serverCallInternal .answerCallWithResponseAsync(AnswerCallRequestConverter.convert(incomingCallContext, answerCallOptions)) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .map(response -> new SimpleResponse<>(response, new CallConnectionAsync(response.getValue().getCallConnectionId(), callConnectionInternal))); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<CallConnection> answerInternal( String incomingCallContext, AnswerCallOptions answerCallOptions) { try { return serverCallInternal .answerCallAsync(AnswerCallRequestConverter.convert(incomingCallContext, answerCallOptions)) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .flatMap(response -> Mono.just(new CallConnection(new CallConnectionAsync(response.getCallConnectionId(), callConnectionInternal)))); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<CallConnection>>answerWithResponseInternal( String incomingCallContext, AnswerCallOptions answerCallOptions, Context context) { try { return withContext(contextValue -> { contextValue = context == null ? contextValue : context; return serverCallInternal .answerCallWithResponseAsync(AnswerCallRequestConverter.convert(incomingCallContext, answerCallOptions), contextValue) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .map(response -> new SimpleResponse<>( response, new CallConnection(new CallConnectionAsync(response.getValue().getCallConnectionId(), callConnectionInternal)))); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Get CallConnection object * * @param callConnectionId Call connection id. * @return CallConnection object. */ public CallConnectionAsync getCallConnection(String callConnectionId) { return new CallConnectionAsync(callConnectionId, callConnectionInternal); } CallConnection getCallConnectionInternal(String callConnectionId) { return new CallConnection(new CallConnectionAsync(callConnectionId, callConnectionInternal)); } /** * Add a participant to the call. * * @param callLocator the call locator. * @param participant The identifier of the participant. * @param callBackUri callBackUri to get notifications. * @param alternateCallerId Phone number to use when adding a phone number participant. * @param operationContext Value to identify context of the operation. This is used to co-relate other * communications related to this operation * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response for a successful add participant request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<AddParticipantResult> addParticipant( CallLocator callLocator, CommunicationIdentifier participant, URI callBackUri, String alternateCallerId, String operationContext) { try { AddParticipantWithCallLocatorRequest requestWithCallLocator = new AddParticipantWithCallLocatorRequest() .setCallLocator(CallLocatorConverter.convert(callLocator)) .setParticipant(CommunicationIdentifierConverter.convert(participant)) .setAlternateCallerId(PhoneNumberIdentifierConverter.convert(alternateCallerId)) .setOperationContext(operationContext) .setCallbackUri(callBackUri.toString()); return serverCallInternal.addParticipantAsync(requestWithCallLocator, Context.NONE) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .flatMap(result -> Mono.just(new AddParticipantResult(result.getParticipantId()))); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Add a participant to the call. * * @param callLocator the call locator. * @param participant The identifier of the participant. * @param callBackUri callBackUri to get notifications. * @param alternateCallerId Phone number to use when adding a phone number participant. * @param operationContext Value to identify context of the operation. This is used to co-relate other * communications related to this operation * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response for a successful add participant request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<AddParticipantResult>> addParticipantWithResponse( CallLocator callLocator, CommunicationIdentifier participant, URI callBackUri, String alternateCallerId, String operationContext) { return addParticipantWithResponse( callLocator, participant, callBackUri, alternateCallerId, operationContext, null); } Mono<Response<AddParticipantResult>> addParticipantWithResponse( CallLocator callLocator, CommunicationIdentifier participant, URI callBackUri, String alternateCallerId, String operationContext, Context context) { try { AddParticipantWithCallLocatorRequest requestWithCallLocator = new AddParticipantWithCallLocatorRequest() .setCallLocator(CallLocatorConverter.convert(callLocator)) .setParticipant(CommunicationIdentifierConverter.convert(participant)) .setAlternateCallerId(PhoneNumberIdentifierConverter.convert(alternateCallerId)) .setOperationContext(operationContext) .setCallbackUri(callBackUri.toString()); return withContext(contextValue -> { contextValue = context == null ? contextValue : context; return serverCallInternal .addParticipantWithResponseAsync(requestWithCallLocator, contextValue) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .map(response -> new SimpleResponse<>(response, new AddParticipantResult(response.getValue().getParticipantId()))); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Remove a participant from the call. * * @param callLocator the call locator. * @param participant The identifier of the participant. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response for a successful remove participant request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> removeParticipant(CallLocator callLocator, CommunicationIdentifier participant) { try { RemoveParticipantWithCallLocatorRequest requestWithCallLocator = getRemoveParticipantWithCallLocatorRequest(callLocator, participant); return serverCallInternal.removeParticipantAsync(requestWithCallLocator) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .flatMap(result -> Mono.empty()); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Remove a participant from the call. * * @param callLocator the call locator. * @param participant The identifier of the participant. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response for a successful remove participant request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> removeParticipantWithResponse(CallLocator callLocator, CommunicationIdentifier participant) { return removeParticipantWithResponse(callLocator, participant, null); } Mono<Response<Void>> removeParticipantWithResponse(CallLocator callLocator, CommunicationIdentifier participant, Context context) { try { RemoveParticipantWithCallLocatorRequest requestWithCallLocator = getRemoveParticipantWithCallLocatorRequest(callLocator, participant); return withContext(contextValue -> { contextValue = context == null ? contextValue : context; return serverCallInternal .removeParticipantWithResponseAsync(requestWithCallLocator, contextValue) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } private RemoveParticipantWithCallLocatorRequest getRemoveParticipantWithCallLocatorRequest(CallLocator callLocator, CommunicationIdentifier participant) { RemoveParticipantWithCallLocatorRequest requestWithCallLocator = new RemoveParticipantWithCallLocatorRequest() .setCallLocator(CallLocatorConverter.convert(callLocator)) .setIdentifier(CommunicationIdentifierConverter.convert(participant)); return requestWithCallLocator; } /** * Get participant from the call. * * @param callLocator the call locator. * @param participant The identifier of the participant. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response for a successful get participant request. */ @ServiceMethod(returns = ReturnType.COLLECTION) public Mono<CallParticipant> getParticipant(CallLocator callLocator, CommunicationIdentifier participant) { try { GetParticipantWithCallLocatorRequest requestWithCallLocator = getGetParticipantWithCallLocatorRequest(callLocator, participant); return serverCallInternal.getParticipantAsync(requestWithCallLocator) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .flatMap(result -> Mono.just(CallParticipantConverter.convert(result))); } catch (RuntimeException ex) { return monoError(logger, ex); } } private GetParticipantWithCallLocatorRequest getGetParticipantWithCallLocatorRequest(CallLocator callLocator, CommunicationIdentifier participant) { GetParticipantWithCallLocatorRequest requestWithCallLocator = new GetParticipantWithCallLocatorRequest() .setCallLocator(CallLocatorConverter.convert(callLocator)) .setIdentifier(CommunicationIdentifierConverter.convert(participant)); return requestWithCallLocator; } /** * Get participant from the call using identifier. * * @param callLocator the call locator. * @param participant The identifier of the participant. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response for a successful get participant request. */ @ServiceMethod(returns = ReturnType.COLLECTION) public Mono<Response<CallParticipant>> getParticipantWithResponse(CallLocator callLocator, CommunicationIdentifier participant) { return getParticipantWithResponse(callLocator, participant, Context.NONE); } Mono<Response<CallParticipant>> getParticipantWithResponse(CallLocator callLocator, CommunicationIdentifier participant, Context context) { try { GetParticipantWithCallLocatorRequest requestWithCallLocator = getGetParticipantWithCallLocatorRequest(callLocator, participant); return withContext(contextValue -> { contextValue = context == null ? contextValue : context; return serverCallInternal.getParticipantWithResponseAsync(requestWithCallLocator, contextValue) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .map(response -> new SimpleResponse<>(response, CallParticipantConverter.convert(response.getValue()) ) ); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Get participants from a server call. * * @param callLocator the call locator. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response for a successful get participants request. */ @ServiceMethod(returns = ReturnType.COLLECTION) public Mono<List<CallParticipant>> getParticipants(CallLocator callLocator) { try { GetAllParticipantsWithCallLocatorRequest requestWithCallLocator = new GetAllParticipantsWithCallLocatorRequest() .setCallLocator(CallLocatorConverter.convert(callLocator)); return serverCallInternal.getParticipantsAsync(requestWithCallLocator) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .flatMap(result -> Mono.just( result.stream().map(CallParticipantConverter::convert).collect(Collectors.toList()))); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Get participants from a server call. * @param callLocator the call locator. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response for a successful get participants request. */ @ServiceMethod(returns = ReturnType.COLLECTION) public Mono<Response<List<CallParticipant>>> getParticipantsWithResponse(CallLocator callLocator) { return getParticipantsWithResponse(callLocator, Context.NONE); } Mono<Response<List<CallParticipant>>> getParticipantsWithResponse(CallLocator callLocator, Context context) { try { GetAllParticipantsWithCallLocatorRequest requestWithCallLocator = new GetAllParticipantsWithCallLocatorRequest() .setCallLocator(CallLocatorConverter.convert(callLocator)); return withContext(contextValue -> { contextValue = context == null ? contextValue : context; return serverCallInternal.getParticipantsWithResponseAsync(requestWithCallLocator, contextValue) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .map(response -> new SimpleResponse<>(response, response.getValue() .stream() .map(CallParticipantConverter::convert) .collect(Collectors.toList() ) ) ); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Start recording of the call. * * * @param callLocator the call locator. * @param recordingStateCallbackUri Uri to send state change callbacks. * @throws InvalidParameterException is recordingStateCallbackUri is absolute uri. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response for a successful start recording request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<StartCallRecordingResult> startRecording(CallLocator callLocator, URI recordingStateCallbackUri) { try { if (!Boolean.TRUE.equals(recordingStateCallbackUri.isAbsolute())) { throw logger.logExceptionAsError(new InvalidParameterException("'recordingStateCallbackUri' has to be an absolute Uri")); } StartCallRecordingWithCallLocatorRequest requestWithCallLocator = getStartCallRecordingWithCallLocatorRequest(callLocator, recordingStateCallbackUri); return serverCallInternal.startRecordingAsync(requestWithCallLocator, null) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .flatMap(result -> Mono.just(new StartCallRecordingResult(result.getRecordingId()))); } catch (RuntimeException ex) { return monoError(logger, ex); } } private StartCallRecordingWithCallLocatorRequest getStartCallRecordingWithCallLocatorRequest(CallLocator callLocator, URI recordingStateCallbackUri) { StartCallRecordingWithCallLocatorRequest requestWithCallLocator = new StartCallRecordingWithCallLocatorRequest() .setCallLocator(CallLocatorConverter.convert(callLocator)) .setRecordingStateCallbackUri(recordingStateCallbackUri.toString()); return requestWithCallLocator; } /** * Start recording of the call. * * @param callLocator the call locator. * @param recordingStateCallbackUri Uri to send state change callbacks. * @param startRecordingOptions StartRecordingOptions custom options. * @param context A {@link Context} representing the request context. * @throws InvalidParameterException is recordingStateCallbackUri is absolute uri. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response for a successful start recording request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<StartCallRecordingResult>> startRecordingWithResponse( CallLocator callLocator, URI recordingStateCallbackUri, StartRecordingOptions startRecordingOptions, Context context) { try { if (!Boolean.TRUE.equals(recordingStateCallbackUri.isAbsolute())) { throw logger.logExceptionAsError(new InvalidParameterException("'recordingStateCallbackUri' has to be an absolute Uri")); } StartCallRecordingWithCallLocatorRequest requestWithCallLocator = getStartCallRecordingWithCallLocatorRequest(callLocator, recordingStateCallbackUri); return withContext(contextValue -> { contextValue = context == null ? contextValue : context; return serverCallInternal .startRecordingWithResponseAsync(requestWithCallLocator, contextValue) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .map(response -> new SimpleResponse<>(response, new StartCallRecordingResult(response.getValue().getRecordingId()))); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Stop recording of the call. * * @param recordingId Recording id to stop. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response for a successful stop recording request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> stopRecording(String recordingId) { try { return serverCallInternal.stopRecordingAsync(recordingId) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .flatMap(result -> Mono.empty()); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Stop recording of the call. * * @param recordingId Recording id to stop. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response for a successful stop recording request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> stopRecordingWithResponse(String recordingId) { return stopRecordingWithResponse(recordingId, Context.NONE); } Mono<Response<Void>> stopRecordingWithResponse(String recordingId, Context context) { try { return withContext(contextValue -> { contextValue = context == null ? contextValue : context; return serverCallInternal .stopRecordingWithResponseAsync(recordingId, contextValue) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Pause recording of the call. * * @param recordingId Recording id to stop. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response for a successful pause recording request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> pauseRecording(String recordingId) { try { return serverCallInternal.pauseRecordingAsync(recordingId) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .flatMap(result -> Mono.empty()); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Pause recording of the call. * * @param recordingId Recording id to stop. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response for a successful pause recording request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> pauseRecordingWithResponse(String recordingId) { return pauseRecordingWithResponse(recordingId, Context.NONE); } Mono<Response<Void>> pauseRecordingWithResponse(String recordingId, Context context) { try { return withContext(contextValue -> { contextValue = context == null ? contextValue : context; return serverCallInternal .pauseRecordingWithResponseAsync(recordingId, contextValue) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Resume recording of the call. * * @param recordingId Recording id to stop. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return response for a successful resume recording request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> resumeRecording(String recordingId) { try { return serverCallInternal.resumeRecordingAsync(recordingId) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .flatMap(result -> Mono.empty()); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Resume recording of the call. * * @param recordingId Recording id to stop. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return response for a successful resume recording request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> resumeRecordingWithResponse(String recordingId) { return resumeRecordingWithResponse(recordingId, Context.NONE); } Mono<Response<Void>> resumeRecordingWithResponse(String recordingId, Context context) { try { return withContext(contextValue -> { contextValue = context == null ? contextValue : context; return serverCallInternal .resumeRecordingWithResponseAsync(recordingId, contextValue) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Get current recording state by recording id. * * @param recordingId Recording id to stop. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response for a successful get recording state request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<CallRecordingProperties> getRecordingState(String recordingId) { try { return serverCallInternal.getRecordingPropertiesAsync(recordingId) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .flatMap(result -> Mono.just(new CallRecordingProperties(result.getRecordingState()))); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Get current recording state by recording id. * * @param recordingId Recording id to stop. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response for a successful get recording state request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<CallRecordingProperties>> getRecordingStateWithResponse(String recordingId) { return getRecordingStateWithResponse(recordingId, Context.NONE); } Mono<Response<CallRecordingProperties>> getRecordingStateWithResponse(String recordingId, Context context) { try { return withContext(contextValue -> { contextValue = context == null ? contextValue : context; return serverCallInternal .getRecordingPropertiesWithResponseAsync(recordingId, contextValue) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .map(response -> new SimpleResponse<>(response, new CallRecordingProperties(response.getValue().getRecordingState()))); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Download the recording content, e.g. Recording's metadata, Recording video, from the ACS endpoint * passed as parameter. * @param sourceEndpoint - URL where the content is located. * @return A {@link Flux} object containing the byte stream of the content requested. */ @ServiceMethod(returns = ReturnType.COLLECTION) public Flux<ByteBuffer> downloadStream(String sourceEndpoint) { try { return downloadStream(sourceEndpoint, null); } catch (RuntimeException ex) { return fluxError(logger, ex); } } /** * Download the recording content, e.g. Recording's metadata, Recording video, from the ACS endpoint * passed as parameter. * @param sourceEndpoint - URL where the content is located. * @param httpRange - An optional {@link HttpRange} value containing the range of bytes to download. If missing, * the whole content will be downloaded. * @return A {@link Flux} object containing the byte stream of the content requested. */ @ServiceMethod(returns = ReturnType.COLLECTION) public Flux<ByteBuffer> downloadStream(String sourceEndpoint, HttpRange httpRange) { try { return contentDownloader.downloadStreamWithResponse(sourceEndpoint, httpRange, Context.NONE) .map(Response::getValue) .flux() .flatMap(flux -> flux); } catch (RuntimeException ex) { return fluxError(logger, ex); } } /** * Download the recording content, (e.g. Recording's metadata, Recording video, etc.) from the {@code endpoint}. * @param sourceEndpoint - URL where the content is located. * @param range - An optional {@link HttpRange} value containing the range of bytes to download. If missing, * the whole content will be downloaded. * @return A {@link Mono} object containing a {@link Response} with the byte stream of the content requested. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Flux<ByteBuffer>>> downloadStreamWithResponse(String sourceEndpoint, HttpRange range) { try { return contentDownloader.downloadStreamWithResponse(sourceEndpoint, range, Context.NONE); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Download the content located in {@code endpoint} into a file marked by {@code path}. * This download will be done using parallel workers. * @param sourceEndpoint - ACS URL where the content is located. * @param destinationPath - File location. * @param parallelDownloadOptions - an optional {@link ParallelDownloadOptions} object to modify how the parallel * download will work. * @param overwrite - True to overwrite the file if it exists. * @return Response for a successful downloadTo request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> downloadTo( String sourceEndpoint, Path destinationPath, ParallelDownloadOptions parallelDownloadOptions, boolean overwrite) { try { return downloadToWithResponse(sourceEndpoint, destinationPath, parallelDownloadOptions, overwrite, Context.NONE) .then(); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Download the content located in {@code endpoint} into a file marked by {@code path}. * This download will be done using parallel workers. * @param sourceEndpoint - ACS URL where the content is located. * @param destinationPath - File location. * @param parallelDownloadOptions - an optional {@link ParallelDownloadOptions} object to modify how the parallel * download will work. * @param overwrite - True to overwrite the file if it exists. * @return Response containing the http response information from the download. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> downloadToWithResponse( String sourceEndpoint, Path destinationPath, ParallelDownloadOptions parallelDownloadOptions, boolean overwrite) { try { return downloadToWithResponse(sourceEndpoint, destinationPath, parallelDownloadOptions, overwrite, Context.NONE); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> downloadToWithResponse( String sourceEndpoint, OutputStream destinationStream, HttpRange httpRange, Context context) { return contentDownloader.downloadToStreamWithResponse(sourceEndpoint, destinationStream, httpRange, context); } Mono<Response<Void>> downloadToWithResponse( String sourceEndpoint, Path destinationPath, ParallelDownloadOptions parallelDownloadOptions, boolean overwrite, Context context) { Set<OpenOption> openOptions = new HashSet<>(); if (overwrite) { openOptions.add(StandardOpenOption.CREATE); } else { openOptions.add(StandardOpenOption.CREATE_NEW); } openOptions.add(StandardOpenOption.WRITE); try { AsynchronousFileChannel file = AsynchronousFileChannel.open(destinationPath, openOptions, null); return downloadToWithResponse(sourceEndpoint, destinationPath, file, parallelDownloadOptions, context); } catch (IOException ex) { return monoError(logger, new RuntimeException(ex)); } } Mono<Response<Void>> downloadToWithResponse( String sourceEndpoint, Path destinationPath, AsynchronousFileChannel fileChannel, ParallelDownloadOptions parallelDownloadOptions, Context context ) { ParallelDownloadOptions finalParallelDownloadOptions = parallelDownloadOptions == null ? new ParallelDownloadOptions() : parallelDownloadOptions; return Mono.just(fileChannel).flatMap( c -> contentDownloader.downloadToFileWithResponse(sourceEndpoint, c, finalParallelDownloadOptions, context)) .doFinally(signalType -> contentDownloader.downloadToFileCleanup(fileChannel, destinationPath, signalType)); } /** * Play audio in a call. * * @param callLocator The call locator. * @param audioFileUri The media resource uri of the play audio request. Currently only Wave file (.wav) format * audio prompts are supported. More specifically, the audio content in the wave file must * be mono (single-channel), 16-bit samples with a 16,000 (16KHz) sampling rate. * @param playAudioOptions Options for play audio. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response payload for play audio operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PlayAudioResult> playAudio(CallLocator callLocator, URI audioFileUri, PlayAudioOptions playAudioOptions) { return playAudioInternal(callLocator, audioFileUri, playAudioOptions, Context.NONE); } Mono<PlayAudioResult> playAudioInternal(CallLocator callLocator, URI audioFileUri, PlayAudioOptions playAudioOptions, Context context) { try { PlayAudioWithCallLocatorRequest requestWithCallLocator = getPlayAudioWithCallLocatorRequest(callLocator, audioFileUri, playAudioOptions); return serverCallInternal.playAudioAsync(requestWithCallLocator, context) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .flatMap(result -> Mono.just(PlayAudioResultConverter.convert(result))); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Play audio in a call. * * @param callLocator The server call id. * @param audioFileUri The media resource uri of the play audio request. Currently only Wave file (.wav) format * audio prompts are supported. More specifically, the audio content in the wave file must * be mono (single-channel), 16-bit samples with a 16,000 (16KHz) sampling rate. * @param playAudioOptions Options for play audio. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response payload for play audio operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<PlayAudioResult>> playAudioWithResponse( CallLocator callLocator, URI audioFileUri, PlayAudioOptions playAudioOptions) { return playAudioWithResponseInternal(callLocator, audioFileUri, playAudioOptions, Context.NONE); } Mono<Response<PlayAudioResult>> playAudioWithResponseInternal( CallLocator callLocator, URI audioFileUri, PlayAudioOptions playAudioOptions, Context context) { try { PlayAudioWithCallLocatorRequest requestWithCallLocator = getPlayAudioWithCallLocatorRequest(callLocator, audioFileUri, playAudioOptions); return withContext(contextValue -> { contextValue = context == null ? contextValue : context; return serverCallInternal .playAudioWithResponseAsync(requestWithCallLocator, contextValue) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .map(response -> new SimpleResponse<>(response, PlayAudioResultConverter.convert(response.getValue()))); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } private PlayAudioWithCallLocatorRequest getPlayAudioWithCallLocatorRequest(CallLocator callLocator, URI audioFileUri, PlayAudioOptions playAudioOptions) { PlayAudioWithCallLocatorRequest requestWithCallLocator = new PlayAudioWithCallLocatorRequest() .setCallLocator(CallLocatorConverter.convert(callLocator)) .setAudioFileUri(audioFileUri.toString()); if (playAudioOptions != null) { requestWithCallLocator .setLoop(playAudioOptions.isLoop()) .setOperationContext(playAudioOptions.getOperationContext()) .setAudioFileId(playAudioOptions.getAudioFileId()) .setCallbackUri(playAudioOptions.getCallbackUri().toString()); } return requestWithCallLocator; } /** * Cancel Media Operation. * * @param callLocator The server call id. * @param mediaOperationId The Id of the media operation to Cancel. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response payload for play audio operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> cancelMediaOperation( CallLocator callLocator, String mediaOperationId) { return cancelMediaOperationWithResponseInternal(callLocator, mediaOperationId, Context.NONE); } /** * Cancel Media Operation. * * @param callLocator The server call id. * @param mediaOperationId The Id of the media operation to Cancel. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response payload for play audio operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> cancelMediaOperationWithResponse( CallLocator callLocator, String mediaOperationId) { return cancelMediaOperationWithResponseInternal(callLocator, mediaOperationId, Context.NONE); } /** * Cancel Media Operation. * * @param callLocator The server call id. * @param mediaOperationId The Id of the media operation to Cancel. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response payload for play audio operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> cancelParticipantMediaOperation( CallLocator callLocator, String mediaOperationId) { return cancelMediaOperationWithResponseInternal(callLocator, mediaOperationId, Context.NONE); } Mono<Response<Void>> cancelMediaOperationWithResponseInternal( CallLocator callLocator, String mediaOperationId, Context context) { try { CancelMediaOperationWithCallLocatorRequest requestWithCallLocator = new CancelMediaOperationWithCallLocatorRequest() .setCallLocator(CallLocatorConverter.convert(callLocator)) .setMediaOperationId(mediaOperationId); return withContext(contextValue -> { contextValue = context == null ? contextValue : context; return serverCallInternal .cancelMediaOperationWithResponseAsync(requestWithCallLocator, contextValue) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Cancel Participant Media Operation. * * @param callLocator The server call id. * @param participant The identifier of the participant. * @param mediaOperationId The Id of the media operation to Cancel. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response payload for play audio operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> cancelParticipantMediaOperationWithResponse( CallLocator callLocator, CommunicationIdentifier participant, String mediaOperationId) { return cancelParticipantMediaOperationWithResponseInternal(callLocator, participant, mediaOperationId, Context.NONE); } /** * Cancel Participant Media Operation. * * @param callLocator The server call id. * @param participant The identifier of the participant. * @param mediaOperationId The Id of the media operation to Cancel. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response payload for play audio operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> cancelParticipantMediaOperation( CallLocator callLocator, CommunicationIdentifier participant, String mediaOperationId) { return cancelParticipantMediaOperationWithResponseInternal(callLocator, participant, mediaOperationId, Context.NONE) .flatMap(result -> Mono.empty()); } Mono<Response<Void>> cancelParticipantMediaOperationWithResponseInternal( CallLocator callLocator, CommunicationIdentifier participant, String mediaOperationId, Context context) { try { CancelParticipantMediaOperationWithCallLocatorRequest requestWithCallLocator = new CancelParticipantMediaOperationWithCallLocatorRequest() .setCallLocator(CallLocatorConverter.convert(callLocator)) .setIdentifier(CommunicationIdentifierConverter.convert(participant)) .setMediaOperationId(mediaOperationId); return withContext(contextValue -> { contextValue = context == null ? contextValue : context; return serverCallInternal .cancelParticipantMediaOperationWithResponseAsync(requestWithCallLocator, contextValue) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Play audio to a participant. * * @param callLocator The server call id. * @param participant The identifier of the participant. * @param audioFileUri The media resource uri of the play audio request. Currently only Wave file (.wav) format * audio prompts are supported. More specifically, the audio content in the wave file must * be mono (single-channel), 16-bit samples with a 16,000 (16KHz) sampling rate. * @param playAudioOptions Options for play audio. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response payload for play audio to participant operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PlayAudioResult> playAudioToParticipant(CallLocator callLocator, CommunicationIdentifier participant, URI audioFileUri, PlayAudioOptions playAudioOptions) { return playAudioToParticipantInternal(callLocator, participant, audioFileUri, playAudioOptions, Context.NONE); } Mono<PlayAudioResult> playAudioToParticipantInternal(CallLocator callLocator, CommunicationIdentifier participant, URI audioFileUri, PlayAudioOptions playAudioOptions, Context context) { try { PlayAudioToParticipantWithCallLocatorRequest requestWithCallLocator = new PlayAudioToParticipantWithCallLocatorRequest() .setCallLocator(CallLocatorConverter.convert(callLocator)) .setIdentifier(CommunicationIdentifierConverter.convert(participant)) .setAudioFileUri(audioFileUri.toString()); if (playAudioOptions != null) { requestWithCallLocator .setLoop(playAudioOptions.isLoop()) .setOperationContext(playAudioOptions.getOperationContext()) .setAudioFileId(playAudioOptions.getAudioFileId()) .setCallbackUri(playAudioOptions.getCallbackUri().toString()); } return serverCallInternal.participantPlayAudioAsync(requestWithCallLocator, context) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .flatMap(result -> Mono.just(PlayAudioResultConverter.convert(result))); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Play audio to a participant. * * @param callLocator The server call id. * @param participant The identifier of the participant. * @param audioFileUri The media resource uri of the play audio request. Currently only Wave file (.wav) format * audio prompts are supported. More specifically, the audio content in the wave file must * be mono (single-channel), 16-bit samples with a 16,000 (16KHz) sampling rate. * @param playAudioOptions Options for play audio. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response payload for play audio operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<PlayAudioResult>> playAudioToParticipantWithResponse( CallLocator callLocator, CommunicationIdentifier participant, URI audioFileUri, PlayAudioOptions playAudioOptions) { return playAudioToParticipantWithResponseInternal(callLocator, participant, audioFileUri, playAudioOptions, Context.NONE); } Mono<Response<PlayAudioResult>> playAudioToParticipantWithResponseInternal( CallLocator callLocator, CommunicationIdentifier participant, URI audioFileUri, PlayAudioOptions playAudioOptions, Context context) { try { PlayAudioToParticipantWithCallLocatorRequest requestWithCallLocator = new PlayAudioToParticipantWithCallLocatorRequest() .setCallLocator(CallLocatorConverter.convert(callLocator)) .setIdentifier(CommunicationIdentifierConverter.convert(participant)) .setAudioFileUri(audioFileUri.toString()); if (playAudioOptions != null) { requestWithCallLocator .setLoop(playAudioOptions.isLoop()) .setOperationContext(playAudioOptions.getOperationContext()) .setAudioFileId(playAudioOptions.getAudioFileId()) .setCallbackUri(playAudioOptions.getCallbackUri().toString()); } return withContext(contextValue -> { contextValue = context == null ? contextValue : context; return serverCallInternal .participantPlayAudioWithResponseAsync(requestWithCallLocator, contextValue) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException) .map(response -> new SimpleResponse<>(response, PlayAudioResultConverter.convert(response.getValue()))); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Redirect the call. * * @param incomingCallContext the incomingCallContext value to set. * @param target the target value to set. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws CommunicationErrorResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the completion. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> redirectCall(String incomingCallContext, CommunicationIdentifier target) { try { RedirectCallRequest request = getRedirectCallRequest(incomingCallContext, target); return serverCallInternal.redirectCallAsync(request) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Redirect the call. * * @param incomingCallContext the incomingCallContext value to set. * @param target the target value to set. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws CommunicationErrorResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the completion. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> redirectCallWithResponse(String incomingCallContext, CommunicationIdentifier target) { return redirectCallWithResponseInternal(incomingCallContext, target, Context.NONE); } Mono<Response<Void>> redirectCallWithResponseInternal(String incomingCallContext, CommunicationIdentifier target, Context context) { try { RedirectCallRequest request = getRedirectCallRequest(incomingCallContext, target); return serverCallInternal.redirectCallWithResponseAsync(request, context) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Reject the call. * * @param incomingCallContext the incomingCallContext value to set. * @param rejectReason the call reject reason value to set. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws CommunicationErrorResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the completion. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> rejectCall(String incomingCallContext, CallRejectReason rejectReason) { try { RejectCallRequest request = getRejectCallRequest(incomingCallContext, rejectReason); return serverCallInternal.rejectCallAsync(request) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException); } catch (RuntimeException ex) { return monoError(logger, ex); } } private RejectCallRequest getRejectCallRequest(String incomingCallContext, CallRejectReason rejectReason) { RejectCallRequest request = new RejectCallRequest() .setIncomingCallContext(incomingCallContext) .setCallRejectReason(rejectReason); return request; } /** * Reject the call. * * @param incomingCallContext the incomingCallContext value to set. * @param rejectReason the call reject reason value to set. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws CommunicationErrorResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the completion. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> rejectCallWithResponse(String incomingCallContext, CallRejectReason rejectReason) { return rejectCallWithResponseInternal(incomingCallContext, rejectReason, Context.NONE); } Mono<Response<Void>> rejectCallWithResponseInternal(String incomingCallContext, CallRejectReason rejectReason, Context context) { try { RejectCallRequest request = getRejectCallRequest(incomingCallContext, rejectReason); return serverCallInternal.rejectCallWithResponseAsync(request, context) .onErrorMap(CommunicationErrorResponseException.class, CallingServerErrorConverter::translateException); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Delete the content located at the deleteEndpoint * @param deleteEndpoint - ACS URL where the content is located. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Response for successful delete request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteRecording(String deleteEndpoint) { try { return deleteRecordingWithResponse(deleteEndpoint, null).then(); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Delete the content located at the deleteEndpoint * Recording deletion will be done using parallel workers. * @param deleteEndpoint - ACS URL where the content is located. * @param context A {@link Context} representing the request context. * @return Response for successful delete request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<HttpResponse>> deleteRecordingWithResponse(String deleteEndpoint, Context context) { HttpRequest request = new HttpRequest(HttpMethod.DELETE, deleteEndpoint); URL urlToSignWith = getUrlToSignRequestWith(deleteEndpoint); Context finalContext; if (context == null) { finalContext = new Context("hmacSignatureURL", urlToSignWith); } else { finalContext = context.addData("hmacSignatureURL", urlToSignWith); } Mono<HttpResponse> httpResponse = httpPipelineInternal.send(request, finalContext); try { return httpResponse.map(response -> new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null)); } catch (RuntimeException ex) { return monoError(logger, ex); } } private URL getUrlToSignRequestWith(String endpoint) { try { String path = new URL(endpoint).getPath(); if (path.startsWith("/")) { path = path.substring(1); } return new URL(resourceEndpoint + path); } catch (MalformedURLException ex) { throw logger.logExceptionAsError(new IllegalArgumentException(ex)); } } }
Does `securityProfile` have something to do with hibernation?
private void copyInnerToUpdateParameter(VirtualMachineUpdateInner updateParameter) { updateParameter.withHardwareProfile(this.innerModel().hardwareProfile()); updateParameter.withStorageProfile(this.innerModel().storageProfile()); updateParameter.withOsProfile(this.innerModel().osProfile()); updateParameter.withNetworkProfile(this.innerModel().networkProfile()); updateParameter.withDiagnosticsProfile(this.innerModel().diagnosticsProfile()); updateParameter.withBillingProfile(this.innerModel().billingProfile()); updateParameter.withSecurityProfile(this.innerModel().securityProfile()); updateParameter.withAdditionalCapabilities(this.innerModel().additionalCapabilities()); updateParameter.withAvailabilitySet(this.innerModel().availabilitySet()); updateParameter.withLicenseType(this.innerModel().licenseType()); updateParameter.withZones(this.innerModel().zones()); updateParameter.withTags(this.innerModel().tags()); updateParameter.withProximityPlacementGroup(this.innerModel().proximityPlacementGroup()); updateParameter.withPriority(this.innerModel().priority()); updateParameter.withEvictionPolicy(this.innerModel().evictionPolicy()); }
updateParameter.withSecurityProfile(this.innerModel().securityProfile());
private void copyInnerToUpdateParameter(VirtualMachineUpdateInner updateParameter) { updateParameter.withHardwareProfile(this.innerModel().hardwareProfile()); updateParameter.withStorageProfile(this.innerModel().storageProfile()); updateParameter.withOsProfile(this.innerModel().osProfile()); updateParameter.withNetworkProfile(this.innerModel().networkProfile()); updateParameter.withDiagnosticsProfile(this.innerModel().diagnosticsProfile()); updateParameter.withBillingProfile(this.innerModel().billingProfile()); updateParameter.withSecurityProfile(this.innerModel().securityProfile()); updateParameter.withAdditionalCapabilities(this.innerModel().additionalCapabilities()); updateParameter.withAvailabilitySet(this.innerModel().availabilitySet()); updateParameter.withLicenseType(this.innerModel().licenseType()); updateParameter.withZones(this.innerModel().zones()); updateParameter.withTags(this.innerModel().tags()); updateParameter.withProximityPlacementGroup(this.innerModel().proximityPlacementGroup()); updateParameter.withPriority(this.innerModel().priority()); updateParameter.withEvictionPolicy(this.innerModel().evictionPolicy()); }
class VirtualMachineImpl extends GroupableResourceImpl<VirtualMachine, VirtualMachineInner, VirtualMachineImpl, ComputeManager> implements VirtualMachine, VirtualMachine.DefinitionManagedOrUnmanaged, VirtualMachine.DefinitionManaged, VirtualMachine.DefinitionUnmanaged, VirtualMachine.Update, VirtualMachine.DefinitionStages.WithSystemAssignedIdentityBasedAccessOrCreate, VirtualMachine.UpdateStages.WithSystemAssignedIdentityBasedAccessOrUpdate { private final ClientLogger logger = new ClientLogger(VirtualMachineImpl.class); private final StorageManager storageManager; private final NetworkManager networkManager; private final AuthorizationManager authorizationManager; private final String vmName; private final IdentifierProvider namer; private String creatableStorageAccountKey; private String creatableAvailabilitySetKey; private String creatablePrimaryNetworkInterfaceKey; private List<String> creatableSecondaryNetworkInterfaceKeys; private StorageAccount existingStorageAccountToAssociate; private AvailabilitySet existingAvailabilitySetToAssociate; private NetworkInterface existingPrimaryNetworkInterfaceToAssociate; private List<NetworkInterface> existingSecondaryNetworkInterfacesToAssociate; private VirtualMachineInstanceView virtualMachineInstanceView; private boolean isMarketplaceLinuxImage; private NetworkInterface.DefinitionStages.WithPrimaryPrivateIP nicDefinitionWithPrivateIp; private NetworkInterface.DefinitionStages.WithPrimaryNetworkSubnet nicDefinitionWithSubnet; private NetworkInterface.DefinitionStages.WithCreate nicDefinitionWithCreate; private VirtualMachineExtensionsImpl virtualMachineExtensions; private boolean isUnmanagedDiskSelected; private List<VirtualMachineUnmanagedDataDisk> unmanagedDataDisks; private final ManagedDataDiskCollection managedDataDisks; private final BootDiagnosticsHandler bootDiagnosticsHandler; private VirtualMachineMsiHandler virtualMachineMsiHandler; private PublicIpAddress.DefinitionStages.WithCreate implicitPipCreatable; private String newProximityPlacementGroupName; private ProximityPlacementGroupType newProximityPlacementGroupType; private boolean removeOsProfile; VirtualMachineUpdateInner updateParameterSnapshotOnUpdate; private static final SerializerAdapter SERIALIZER_ADAPTER = SerializerFactory.createDefaultManagementSerializerAdapter(); private final ObjectMapper mapper; private static final JacksonAnnotationIntrospector ANNOTATION_INTROSPECTOR = new JacksonAnnotationIntrospector() { @Override public JsonProperty.Access findPropertyAccess(Annotated annotated) { JsonProperty.Access access = super.findPropertyAccess(annotated); if (access == JsonProperty.Access.WRITE_ONLY) { return JsonProperty.Access.AUTO; } return access; } }; VirtualMachineImpl( String name, VirtualMachineInner innerModel, final ComputeManager computeManager, final StorageManager storageManager, final NetworkManager networkManager, final AuthorizationManager authorizationManager) { super(name, innerModel, computeManager); this.storageManager = storageManager; this.networkManager = networkManager; this.authorizationManager = authorizationManager; this.vmName = name; this.isMarketplaceLinuxImage = false; this.namer = this.manager().resourceManager().internalContext().createIdentifierProvider(this.vmName); this.creatableSecondaryNetworkInterfaceKeys = new ArrayList<>(); this.existingSecondaryNetworkInterfacesToAssociate = new ArrayList<>(); this.virtualMachineExtensions = new VirtualMachineExtensionsImpl(computeManager.serviceClient().getVirtualMachineExtensions(), this); this.managedDataDisks = new ManagedDataDiskCollection(this); initializeDataDisks(); this.bootDiagnosticsHandler = new BootDiagnosticsHandler(this); this.virtualMachineMsiHandler = new VirtualMachineMsiHandler(authorizationManager, this); this.newProximityPlacementGroupName = null; this.newProximityPlacementGroupType = null; this.mapper = new ObjectMapper(); this.mapper.setAnnotationIntrospector(ANNOTATION_INTROSPECTOR); } @Override public VirtualMachineImpl update() { updateParameterSnapshotOnUpdate = this.deepCopyInnerToUpdateParameter(); return super.update(); }; @Override public Mono<VirtualMachine> refreshAsync() { return super .refreshAsync() .map( virtualMachine -> { reset(virtualMachine.innerModel()); virtualMachineExtensions.refresh(); return virtualMachine; }); } @Override protected Mono<VirtualMachineInner> getInnerAsync() { return this .manager() .serviceClient() .getVirtualMachines() .getByResourceGroupAsync(this.resourceGroupName(), this.name()); } @Override public void deallocate() { this.deallocateAsync().block(); } @Override public Mono<Void> deallocateAsync() { return this .manager() .serviceClient() .getVirtualMachines() .deallocateAsync(this.resourceGroupName(), this.name()) .map(aVoid -> this.refreshAsync()) .then(); } @Override public void deallocate(boolean hibernate) { this.deallocateAsync(hibernate).block(); } @Override public Mono<Void> deallocateAsync(boolean hibernate) { return this .manager() .serviceClient() .getVirtualMachines() .deallocateAsync(this.resourceGroupName(), this.name(), hibernate) .map(aVoid -> this.refreshAsync()) .then(); } @Override public void generalize() { this.generalizeAsync().block(); } @Override public Mono<Void> generalizeAsync() { return this .manager() .serviceClient() .getVirtualMachines() .generalizeAsync(this.resourceGroupName(), this.name()); } @Override public void powerOff() { this.powerOffAsync().block(); } @Override public Mono<Void> powerOffAsync() { return this .manager() .serviceClient() .getVirtualMachines() .powerOffAsync(this.resourceGroupName(), this.name(), null); } @Override public void restart() { this.restartAsync().block(); } @Override public Mono<Void> restartAsync() { return this.manager().serviceClient().getVirtualMachines().restartAsync(this.resourceGroupName(), this.name()); } @Override public void start() { this.startAsync().block(); } @Override public Mono<Void> startAsync() { return this.manager().serviceClient().getVirtualMachines().startAsync(this.resourceGroupName(), this.name()); } @Override public void redeploy() { this.redeployAsync().block(); } @Override public Mono<Void> redeployAsync() { return this.manager().serviceClient().getVirtualMachines().redeployAsync(this.resourceGroupName(), this.name()); } @Override public void simulateEviction() { this.simulateEvictionAsync().block(); } @Override public Mono<Void> simulateEvictionAsync() { return this .manager() .serviceClient() .getVirtualMachines() .simulateEvictionAsync(this.resourceGroupName(), this.name()); } @Override public void convertToManaged() { this .manager() .serviceClient() .getVirtualMachines() .convertToManagedDisks(this.resourceGroupName(), this.name()); this.refresh(); } @Override public Mono<Void> convertToManagedAsync() { return this .manager() .serviceClient() .getVirtualMachines() .convertToManagedDisksAsync(this.resourceGroupName(), this.name()) .flatMap(aVoid -> refreshAsync()) .then(); } @Override public VirtualMachineEncryption diskEncryption() { return new VirtualMachineEncryptionImpl(this); } @Override public PagedIterable<VirtualMachineSize> availableSizes() { return PagedConverter.mapPage(this .manager() .serviceClient() .getVirtualMachines() .listAvailableSizes(this.resourceGroupName(), this.name()), VirtualMachineSizeImpl::new); } @Override public String capture(String containerName, String vhdPrefix, boolean overwriteVhd) { return this.captureAsync(containerName, vhdPrefix, overwriteVhd).block(); } @Override public Mono<String> captureAsync(String containerName, String vhdPrefix, boolean overwriteVhd) { VirtualMachineCaptureParameters parameters = new VirtualMachineCaptureParameters(); parameters.withDestinationContainerName(containerName); parameters.withOverwriteVhds(overwriteVhd); parameters.withVhdPrefix(vhdPrefix); return this .manager() .serviceClient() .getVirtualMachines() .captureAsync(this.resourceGroupName(), this.name(), parameters) .map( captureResultInner -> { try { return mapper.writeValueAsString(captureResultInner); } catch (JsonProcessingException ex) { throw logger.logExceptionAsError(Exceptions.propagate(ex)); } }); } @Override public VirtualMachineInstanceView refreshInstanceView() { return refreshInstanceViewAsync().block(); } @Override public Mono<VirtualMachineInstanceView> refreshInstanceViewAsync() { return this .manager() .serviceClient() .getVirtualMachines() .getByResourceGroupAsync(this.resourceGroupName(), this.name(), InstanceViewTypes.INSTANCE_VIEW) .map( inner -> { virtualMachineInstanceView = new VirtualMachineInstanceViewImpl(inner.instanceView()); return virtualMachineInstanceView; }) .switchIfEmpty( Mono .defer( () -> { virtualMachineInstanceView = null; return Mono.empty(); })); } @Override public RunCommandResult runPowerShellScript( List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) { return this .manager() .virtualMachines() .runPowerShellScript(this.resourceGroupName(), this.name(), scriptLines, scriptParameters); } @Override public Mono<RunCommandResult> runPowerShellScriptAsync( List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) { return this .manager() .virtualMachines() .runPowerShellScriptAsync(this.resourceGroupName(), this.name(), scriptLines, scriptParameters); } @Override public RunCommandResult runShellScript(List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) { return this .manager() .virtualMachines() .runShellScript(this.resourceGroupName(), this.name(), scriptLines, scriptParameters); } @Override public Mono<RunCommandResult> runShellScriptAsync( List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) { return this .manager() .virtualMachines() .runShellScriptAsync(this.resourceGroupName(), this.name(), scriptLines, scriptParameters); } @Override public RunCommandResult runCommand(RunCommandInput inputCommand) { return this.manager().virtualMachines().runCommand(this.resourceGroupName(), this.name(), inputCommand); } @Override public Mono<RunCommandResult> runCommandAsync(RunCommandInput inputCommand) { return this.manager().virtualMachines().runCommandAsync(this.resourceGroupName(), this.name(), inputCommand); } @Override public VirtualMachineImpl withNewPrimaryNetwork(Creatable<Network> creatable) { this.nicDefinitionWithPrivateIp = this.preparePrimaryNetworkInterface(this.namer.getRandomName("nic", 20)).withNewPrimaryNetwork(creatable); return this; } @Override public VirtualMachineImpl withNewPrimaryNetwork(String addressSpace) { this.nicDefinitionWithPrivateIp = this .preparePrimaryNetworkInterface(this.namer.getRandomName("nic", 20)) .withNewPrimaryNetwork(addressSpace); return this; } @Override public VirtualMachineImpl withExistingPrimaryNetwork(Network network) { this.nicDefinitionWithSubnet = this .preparePrimaryNetworkInterface(this.namer.getRandomName("nic", 20)) .withExistingPrimaryNetwork(network); return this; } @Override public VirtualMachineImpl withSubnet(String name) { this.nicDefinitionWithPrivateIp = this.nicDefinitionWithSubnet.withSubnet(name); return this; } @Override public VirtualMachineImpl withPrimaryPrivateIPAddressDynamic() { this.nicDefinitionWithCreate = this.nicDefinitionWithPrivateIp.withPrimaryPrivateIPAddressDynamic(); return this; } @Override public VirtualMachineImpl withPrimaryPrivateIPAddressStatic(String staticPrivateIPAddress) { this.nicDefinitionWithCreate = this.nicDefinitionWithPrivateIp.withPrimaryPrivateIPAddressStatic(staticPrivateIPAddress); return this; } @Override public VirtualMachineImpl withNewPrimaryPublicIPAddress(Creatable<PublicIpAddress> creatable) { Creatable<NetworkInterface> nicCreatable = this.nicDefinitionWithCreate.withNewPrimaryPublicIPAddress(creatable); this.creatablePrimaryNetworkInterfaceKey = this.addDependency(nicCreatable); return this; } @Override public VirtualMachineImpl withNewPrimaryPublicIPAddress(String leafDnsLabel) { PublicIpAddress.DefinitionStages.WithGroup definitionWithGroup = this .networkManager .publicIpAddresses() .define(this.namer.getRandomName("pip", 15)) .withRegion(this.regionName()); PublicIpAddress.DefinitionStages.WithCreate definitionAfterGroup; if (this.creatableGroup != null) { definitionAfterGroup = definitionWithGroup.withNewResourceGroup(this.creatableGroup); } else { definitionAfterGroup = definitionWithGroup.withExistingResourceGroup(this.resourceGroupName()); } this.implicitPipCreatable = definitionAfterGroup.withLeafDomainLabel(leafDnsLabel); Creatable<NetworkInterface> nicCreatable = this.nicDefinitionWithCreate.withNewPrimaryPublicIPAddress(this.implicitPipCreatable); this.creatablePrimaryNetworkInterfaceKey = this.addDependency(nicCreatable); return this; } @Override public VirtualMachineImpl withExistingPrimaryPublicIPAddress(PublicIpAddress publicIPAddress) { Creatable<NetworkInterface> nicCreatable = this.nicDefinitionWithCreate.withExistingPrimaryPublicIPAddress(publicIPAddress); this.creatablePrimaryNetworkInterfaceKey = this.addDependency(nicCreatable); return this; } @Override public VirtualMachineImpl withoutPrimaryPublicIPAddress() { Creatable<NetworkInterface> nicCreatable = this.nicDefinitionWithCreate; this.creatablePrimaryNetworkInterfaceKey = this.addDependency(nicCreatable); return this; } @Override public VirtualMachineImpl withNewPrimaryNetworkInterface(Creatable<NetworkInterface> creatable) { this.creatablePrimaryNetworkInterfaceKey = this.addDependency(creatable); return this; } public VirtualMachineImpl withNewPrimaryNetworkInterface(String name, String publicDnsNameLabel) { Creatable<NetworkInterface> definitionCreatable = prepareNetworkInterface(name).withNewPrimaryPublicIPAddress(publicDnsNameLabel); return withNewPrimaryNetworkInterface(definitionCreatable); } @Override public VirtualMachineImpl withExistingPrimaryNetworkInterface(NetworkInterface networkInterface) { this.existingPrimaryNetworkInterfaceToAssociate = networkInterface; return this; } @Override public VirtualMachineImpl withStoredWindowsImage(String imageUrl) { VirtualHardDisk userImageVhd = new VirtualHardDisk(); userImageVhd.withUri(imageUrl); this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().storageProfile().osDisk().withImage(userImageVhd); this.innerModel().storageProfile().osDisk().withOsType(OperatingSystemTypes.WINDOWS); this.innerModel().osProfile().withWindowsConfiguration(new WindowsConfiguration()); this.innerModel().osProfile().windowsConfiguration().withProvisionVMAgent(true); this.innerModel().osProfile().windowsConfiguration().withEnableAutomaticUpdates(true); return this; } @Override public VirtualMachineImpl withStoredLinuxImage(String imageUrl) { VirtualHardDisk userImageVhd = new VirtualHardDisk(); userImageVhd.withUri(imageUrl); this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().storageProfile().osDisk().withImage(userImageVhd); this.innerModel().storageProfile().osDisk().withOsType(OperatingSystemTypes.LINUX); this.innerModel().osProfile().withLinuxConfiguration(new LinuxConfiguration()); return this; } @Override public VirtualMachineImpl withPopularWindowsImage(KnownWindowsVirtualMachineImage knownImage) { return withSpecificWindowsImageVersion(knownImage.imageReference()); } @Override public VirtualMachineImpl withPopularLinuxImage(KnownLinuxVirtualMachineImage knownImage) { return withSpecificLinuxImageVersion(knownImage.imageReference()); } @Override public VirtualMachineImpl withSpecificWindowsImageVersion(ImageReference imageReference) { this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().storageProfile().withImageReference(imageReference); this.innerModel().osProfile().withWindowsConfiguration(new WindowsConfiguration()); this.innerModel().osProfile().windowsConfiguration().withProvisionVMAgent(true); this.innerModel().osProfile().windowsConfiguration().withEnableAutomaticUpdates(true); return this; } @Override public VirtualMachineImpl withSpecificLinuxImageVersion(ImageReference imageReference) { this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().storageProfile().withImageReference(imageReference); this.innerModel().osProfile().withLinuxConfiguration(new LinuxConfiguration()); this.isMarketplaceLinuxImage = true; return this; } @Override public VirtualMachineImpl withLatestWindowsImage(String publisher, String offer, String sku) { ImageReference imageReference = new ImageReference(); imageReference.withPublisher(publisher); imageReference.withOffer(offer); imageReference.withSku(sku); imageReference.withVersion("latest"); return withSpecificWindowsImageVersion(imageReference); } @Override public VirtualMachineImpl withLatestLinuxImage(String publisher, String offer, String sku) { ImageReference imageReference = new ImageReference(); imageReference.withPublisher(publisher); imageReference.withOffer(offer); imageReference.withSku(sku); imageReference.withVersion("latest"); return withSpecificLinuxImageVersion(imageReference); } @Override public VirtualMachineImpl withGeneralizedWindowsCustomImage(String customImageId) { ImageReference imageReferenceInner = new ImageReference(); imageReferenceInner.withId(customImageId); this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().storageProfile().withImageReference(imageReferenceInner); this.innerModel().osProfile().withWindowsConfiguration(new WindowsConfiguration()); this.innerModel().osProfile().windowsConfiguration().withProvisionVMAgent(true); this.innerModel().osProfile().windowsConfiguration().withEnableAutomaticUpdates(true); return this; } @Override public VirtualMachineImpl withSpecializedWindowsCustomImage(String customImageId) { this.withGeneralizedWindowsCustomImage(customImageId); this.removeOsProfile = true; return this; } @Override public VirtualMachineImpl withGeneralizedWindowsGalleryImageVersion(String galleryImageVersionId) { return this.withGeneralizedWindowsCustomImage(galleryImageVersionId); } @Override public VirtualMachineImpl withSpecializedWindowsGalleryImageVersion(String galleryImageVersionId) { return this.withSpecializedWindowsCustomImage(galleryImageVersionId); } @Override public VirtualMachineImpl withGeneralizedLinuxCustomImage(String customImageId) { ImageReference imageReferenceInner = new ImageReference(); imageReferenceInner.withId(customImageId); this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().storageProfile().withImageReference(imageReferenceInner); this.innerModel().osProfile().withLinuxConfiguration(new LinuxConfiguration()); this.isMarketplaceLinuxImage = true; return this; } @Override public VirtualMachineImpl withSpecializedLinuxCustomImage(String customImageId) { this.withGeneralizedLinuxCustomImage(customImageId); this.removeOsProfile = true; return this; } @Override public VirtualMachineImpl withGeneralizedLinuxGalleryImageVersion(String galleryImageVersionId) { return this.withGeneralizedLinuxCustomImage(galleryImageVersionId); } @Override public VirtualMachineImpl withSpecializedLinuxGalleryImageVersion(String galleryImageVersionId) { return this.withSpecializedLinuxCustomImage(galleryImageVersionId); } @Override public VirtualMachineImpl withSpecializedOSUnmanagedDisk(String osDiskUrl, OperatingSystemTypes osType) { VirtualHardDisk osVhd = new VirtualHardDisk(); osVhd.withUri(osDiskUrl); this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.ATTACH); this.innerModel().storageProfile().osDisk().withVhd(osVhd); this.innerModel().storageProfile().osDisk().withOsType(osType); this.innerModel().storageProfile().osDisk().withManagedDisk(null); return this; } @Override public VirtualMachineImpl withSpecializedOSDisk(Disk disk, OperatingSystemTypes osType) { ManagedDiskParameters diskParametersInner = new ManagedDiskParameters(); diskParametersInner.withId(disk.id()); this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.ATTACH); this.innerModel().storageProfile().osDisk().withManagedDisk(diskParametersInner); this.innerModel().storageProfile().osDisk().withOsType(osType); this.innerModel().storageProfile().osDisk().withVhd(null); return this; } @Override public VirtualMachineImpl withRootUsername(String rootUserName) { this.innerModel().osProfile().withAdminUsername(rootUserName); return this; } @Override public VirtualMachineImpl withAdminUsername(String adminUserName) { this.innerModel().osProfile().withAdminUsername(adminUserName); return this; } @Override public VirtualMachineImpl withSsh(String publicKeyData) { OSProfile osProfile = this.innerModel().osProfile(); if (osProfile.linuxConfiguration().ssh() == null) { SshConfiguration sshConfiguration = new SshConfiguration(); sshConfiguration.withPublicKeys(new ArrayList<SshPublicKey>()); osProfile.linuxConfiguration().withSsh(sshConfiguration); } SshPublicKey sshPublicKey = new SshPublicKey(); sshPublicKey.withKeyData(publicKeyData); sshPublicKey.withPath("/home/" + osProfile.adminUsername() + "/.ssh/authorized_keys"); osProfile.linuxConfiguration().ssh().publicKeys().add(sshPublicKey); return this; } @Override public VirtualMachineImpl withoutVMAgent() { this.innerModel().osProfile().windowsConfiguration().withProvisionVMAgent(false); return this; } @Override public VirtualMachineImpl withoutAutoUpdate() { this.innerModel().osProfile().windowsConfiguration().withEnableAutomaticUpdates(false); return this; } @Override public VirtualMachineImpl withTimeZone(String timeZone) { this.innerModel().osProfile().windowsConfiguration().withTimeZone(timeZone); return this; } @Override public VirtualMachineImpl withWinRM(WinRMListener listener) { if (this.innerModel().osProfile().windowsConfiguration().winRM() == null) { WinRMConfiguration winRMConfiguration = new WinRMConfiguration(); this.innerModel().osProfile().windowsConfiguration().withWinRM(winRMConfiguration); } this.innerModel().osProfile().windowsConfiguration().winRM().listeners().add(listener); return this; } @Override public VirtualMachineImpl withRootPassword(String password) { this.innerModel().osProfile().withAdminPassword(password); return this; } @Override public VirtualMachineImpl withAdminPassword(String password) { this.innerModel().osProfile().withAdminPassword(password); return this; } @Override public VirtualMachineImpl withCustomData(String base64EncodedCustomData) { this.innerModel().osProfile().withCustomData(base64EncodedCustomData); return this; } @Override public VirtualMachineImpl withComputerName(String computerName) { this.innerModel().osProfile().withComputerName(computerName); return this; } @Override public VirtualMachineImpl withSize(String sizeName) { this.innerModel().hardwareProfile().withVmSize(VirtualMachineSizeTypes.fromString(sizeName)); return this; } @Override public VirtualMachineImpl withSize(VirtualMachineSizeTypes size) { this.innerModel().hardwareProfile().withVmSize(size); return this; } @Override public VirtualMachineImpl withOSDiskCaching(CachingTypes cachingType) { this.innerModel().storageProfile().osDisk().withCaching(cachingType); return this; } @Override public VirtualMachineImpl withOSDiskVhdLocation(String containerName, String vhdName) { if (isManagedDiskEnabled()) { return this; } StorageProfile storageProfile = this.innerModel().storageProfile(); OSDisk osDisk = storageProfile.osDisk(); if (!this.isOSDiskFromImage(osDisk)) { return this; } if (this.isOsDiskFromCustomImage(storageProfile)) { return this; } if (this.isOSDiskFromPlatformImage(storageProfile)) { VirtualHardDisk osVhd = new VirtualHardDisk(); osVhd.withUri(temporaryBlobUrl(containerName, vhdName)); this.innerModel().storageProfile().osDisk().withVhd(osVhd); return this; } if (this.isOSDiskFromStoredImage(storageProfile)) { VirtualHardDisk osVhd = new VirtualHardDisk(); try { URL sourceCustomImageUrl = new URL(osDisk.image().uri()); URL destinationVhdUrl = new URL( sourceCustomImageUrl.getProtocol(), sourceCustomImageUrl.getHost(), "/" + containerName + "/" + vhdName); osVhd.withUri(destinationVhdUrl.toString()); } catch (MalformedURLException ex) { throw logger.logExceptionAsError(new RuntimeException(ex)); } this.innerModel().storageProfile().osDisk().withVhd(osVhd); } return this; } @Override public VirtualMachineImpl withOSDiskStorageAccountType(StorageAccountTypes accountType) { if (this.innerModel().storageProfile().osDisk().managedDisk() == null) { this.innerModel().storageProfile().osDisk().withManagedDisk(new ManagedDiskParameters()); } this.innerModel().storageProfile().osDisk().managedDisk().withStorageAccountType(accountType); return this; } @Override public VirtualMachineImpl withDataDiskDefaultCachingType(CachingTypes cachingType) { this.managedDataDisks.setDefaultCachingType(cachingType); return this; } @Override public VirtualMachineImpl withDataDiskDefaultStorageAccountType(StorageAccountTypes storageAccountType) { this.managedDataDisks.setDefaultStorageAccountType(storageAccountType); return this; } @Override public VirtualMachineImpl withOSDiskEncryptionSettings(DiskEncryptionSettings settings) { this.innerModel().storageProfile().osDisk().withEncryptionSettings(settings); return this; } @Override public VirtualMachineImpl withOSDiskSizeInGB(int size) { this.innerModel().storageProfile().osDisk().withDiskSizeGB(size); return this; } @Override public VirtualMachineImpl withOSDiskName(String name) { this.innerModel().storageProfile().osDisk().withName(name); return this; } @Override public UnmanagedDataDiskImpl defineUnmanagedDataDisk(String name) { throwIfManagedDiskEnabled(ManagedUnmanagedDiskErrors.VM_BOTH_MANAGED_AND_UNMANAGED_DISK_NOT_ALLOWED); return UnmanagedDataDiskImpl.prepareDataDisk(name, this); } @Override public VirtualMachineImpl withNewUnmanagedDataDisk(Integer sizeInGB) { throwIfManagedDiskEnabled(ManagedUnmanagedDiskErrors.VM_BOTH_MANAGED_AND_UNMANAGED_DISK_NOT_ALLOWED); return defineUnmanagedDataDisk(null).withNewVhd(sizeInGB).attach(); } @Override public VirtualMachineImpl withExistingUnmanagedDataDisk( String storageAccountName, String containerName, String vhdName) { throwIfManagedDiskEnabled(ManagedUnmanagedDiskErrors.VM_BOTH_MANAGED_AND_UNMANAGED_DISK_NOT_ALLOWED); return defineUnmanagedDataDisk(null).withExistingVhd(storageAccountName, containerName, vhdName).attach(); } @Override public VirtualMachineImpl withoutUnmanagedDataDisk(String name) { int idx = -1; for (VirtualMachineUnmanagedDataDisk dataDisk : this.unmanagedDataDisks) { idx++; if (dataDisk.name().equalsIgnoreCase(name)) { this.unmanagedDataDisks.remove(idx); this.innerModel().storageProfile().dataDisks().remove(idx); break; } } return this; } @Override public VirtualMachineImpl withoutUnmanagedDataDisk(int lun) { int idx = -1; for (VirtualMachineUnmanagedDataDisk dataDisk : this.unmanagedDataDisks) { idx++; if (dataDisk.lun() == lun) { this.unmanagedDataDisks.remove(idx); this.innerModel().storageProfile().dataDisks().remove(idx); break; } } return this; } @Override public UnmanagedDataDiskImpl updateUnmanagedDataDisk(String name) { throwIfManagedDiskEnabled(ManagedUnmanagedDiskErrors.VM_NO_UNMANAGED_DISK_TO_UPDATE); for (VirtualMachineUnmanagedDataDisk dataDisk : this.unmanagedDataDisks) { if (dataDisk.name().equalsIgnoreCase(name)) { return (UnmanagedDataDiskImpl) dataDisk; } } throw logger.logExceptionAsError(new RuntimeException("A data disk with name '" + name + "' not found")); } @Override public VirtualMachineImpl withNewDataDisk(Creatable<Disk> creatable) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); this.managedDataDisks.newDisksToAttach.put(this.addDependency(creatable), new DataDisk().withLun(-1)); return this; } @Override public VirtualMachineImpl withNewDataDisk(Creatable<Disk> creatable, int lun, CachingTypes cachingType) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); this .managedDataDisks .newDisksToAttach .put(this.addDependency(creatable), new DataDisk().withLun(lun).withCaching(cachingType)); return this; } @Override public VirtualMachineImpl withNewDataDisk(int sizeInGB) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); this.managedDataDisks.implicitDisksToAssociate.add(new DataDisk().withLun(-1).withDiskSizeGB(sizeInGB)); return this; } @Override public VirtualMachineImpl withNewDataDisk(int sizeInGB, int lun, CachingTypes cachingType) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); this .managedDataDisks .implicitDisksToAssociate .add(new DataDisk().withLun(lun).withDiskSizeGB(sizeInGB).withCaching(cachingType)); return this; } @Override public VirtualMachineImpl withNewDataDisk( int sizeInGB, int lun, CachingTypes cachingType, StorageAccountTypes storageAccountType) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); ManagedDiskParameters managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withStorageAccountType(storageAccountType); this .managedDataDisks .implicitDisksToAssociate .add( new DataDisk() .withLun(lun) .withDiskSizeGB(sizeInGB) .withCaching(cachingType) .withManagedDisk(managedDiskParameters)); return this; } @Override public VirtualMachineImpl withExistingDataDisk(Disk disk) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); ManagedDiskParameters managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withId(disk.id()); this .managedDataDisks .existingDisksToAttach .add(new DataDisk().withLun(-1).withManagedDisk(managedDiskParameters)); return this; } @Override public VirtualMachineImpl withExistingDataDisk(Disk disk, int lun, CachingTypes cachingType) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); ManagedDiskParameters managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withId(disk.id()); this .managedDataDisks .existingDisksToAttach .add(new DataDisk().withLun(lun).withManagedDisk(managedDiskParameters).withCaching(cachingType)); return this; } @Override public VirtualMachineImpl withExistingDataDisk(Disk disk, int newSizeInGB, int lun, CachingTypes cachingType) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); ManagedDiskParameters managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withId(disk.id()); this .managedDataDisks .existingDisksToAttach .add( new DataDisk() .withLun(lun) .withDiskSizeGB(newSizeInGB) .withManagedDisk(managedDiskParameters) .withCaching(cachingType)); return this; } @Override public VirtualMachineImpl withNewDataDiskFromImage(int imageLun) { this.managedDataDisks.newDisksFromImage.add(new DataDisk().withLun(imageLun)); return this; } @Override public VirtualMachineImpl withNewDataDiskFromImage(int imageLun, int newSizeInGB, CachingTypes cachingType) { this .managedDataDisks .newDisksFromImage .add(new DataDisk().withLun(imageLun).withDiskSizeGB(newSizeInGB).withCaching(cachingType)); return this; } @Override public VirtualMachineImpl withNewDataDiskFromImage( int imageLun, int newSizeInGB, CachingTypes cachingType, StorageAccountTypes storageAccountType) { ManagedDiskParameters managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withStorageAccountType(storageAccountType); this .managedDataDisks .newDisksFromImage .add( new DataDisk() .withLun(imageLun) .withDiskSizeGB(newSizeInGB) .withManagedDisk(managedDiskParameters) .withCaching(cachingType)); return this; } @Override public VirtualMachineImpl withoutDataDisk(int lun) { if (!isManagedDiskEnabled()) { return this; } this.managedDataDisks.diskLunsToRemove.add(lun); return this; } @Override public VirtualMachineImpl withNewStorageAccount(Creatable<StorageAccount> creatable) { if (this.creatableStorageAccountKey == null) { this.creatableStorageAccountKey = this.addDependency(creatable); } return this; } @Override public VirtualMachineImpl withNewStorageAccount(String name) { StorageAccount.DefinitionStages.WithGroup definitionWithGroup = this.storageManager.storageAccounts().define(name).withRegion(this.regionName()); Creatable<StorageAccount> definitionAfterGroup; if (this.creatableGroup != null) { definitionAfterGroup = definitionWithGroup.withNewResourceGroup(this.creatableGroup); } else { definitionAfterGroup = definitionWithGroup.withExistingResourceGroup(this.resourceGroupName()); } return withNewStorageAccount(definitionAfterGroup); } @Override public VirtualMachineImpl withExistingStorageAccount(StorageAccount storageAccount) { this.existingStorageAccountToAssociate = storageAccount; return this; } @Override public VirtualMachineImpl withNewAvailabilitySet(Creatable<AvailabilitySet> creatable) { if (this.creatableAvailabilitySetKey == null) { this.creatableAvailabilitySetKey = this.addDependency(creatable); } return this; } @Override public VirtualMachineImpl withProximityPlacementGroup(String proximityPlacementGroupId) { this.innerModel().withProximityPlacementGroup(new SubResource().withId(proximityPlacementGroupId)); newProximityPlacementGroupName = null; return this; } @Override public VirtualMachineImpl withNewProximityPlacementGroup( String proximityPlacementGroupName, ProximityPlacementGroupType type) { this.newProximityPlacementGroupName = proximityPlacementGroupName; this.newProximityPlacementGroupType = type; this.innerModel().withProximityPlacementGroup(null); return this; } @Override public VirtualMachineImpl withoutProximityPlacementGroup() { this.innerModel().withProximityPlacementGroup(null); return this; } @Override public VirtualMachineImpl withNewAvailabilitySet(String name) { AvailabilitySet.DefinitionStages.WithGroup definitionWithGroup = super.myManager.availabilitySets().define(name).withRegion(this.regionName()); AvailabilitySet.DefinitionStages.WithSku definitionWithSku; if (this.creatableGroup != null) { definitionWithSku = definitionWithGroup.withNewResourceGroup(this.creatableGroup); } else { definitionWithSku = definitionWithGroup.withExistingResourceGroup(this.resourceGroupName()); } Creatable<AvailabilitySet> creatable; if (isManagedDiskEnabled()) { creatable = definitionWithSku.withSku(AvailabilitySetSkuTypes.ALIGNED); } else { creatable = definitionWithSku.withSku(AvailabilitySetSkuTypes.CLASSIC); } return withNewAvailabilitySet(creatable); } @Override public VirtualMachineImpl withExistingAvailabilitySet(AvailabilitySet availabilitySet) { this.existingAvailabilitySetToAssociate = availabilitySet; return this; } @Override public VirtualMachineImpl withNewSecondaryNetworkInterface(Creatable<NetworkInterface> creatable) { this.creatableSecondaryNetworkInterfaceKeys.add(this.addDependency(creatable)); return this; } @Override public VirtualMachineImpl withExistingSecondaryNetworkInterface(NetworkInterface networkInterface) { this.existingSecondaryNetworkInterfacesToAssociate.add(networkInterface); return this; } @Override public VirtualMachineExtensionImpl defineNewExtension(String name) { return this.virtualMachineExtensions.define(name); } @Override public VirtualMachineImpl withoutSecondaryNetworkInterface(String name) { if (this.innerModel().networkProfile() != null && this.innerModel().networkProfile().networkInterfaces() != null) { int idx = -1; for (NetworkInterfaceReference nicReference : this.innerModel().networkProfile().networkInterfaces()) { idx++; if (!nicReference.primary() && name.equalsIgnoreCase(ResourceUtils.nameFromResourceId(nicReference.id()))) { this.innerModel().networkProfile().networkInterfaces().remove(idx); break; } } } return this; } @Override public VirtualMachineExtensionImpl updateExtension(String name) { return this.virtualMachineExtensions.update(name); } @Override public VirtualMachineImpl withoutExtension(String name) { this.virtualMachineExtensions.remove(name); return this; } @Override public VirtualMachineImpl withPlan(PurchasePlan plan) { this.innerModel().withPlan(new Plan()); this.innerModel().plan().withPublisher(plan.publisher()).withProduct(plan.product()).withName(plan.name()); return this; } @Override public VirtualMachineImpl withPromotionalPlan(PurchasePlan plan, String promotionCode) { this.withPlan(plan); this.innerModel().plan().withPromotionCode(promotionCode); return this; } @Override public VirtualMachineImpl withUnmanagedDisks() { this.isUnmanagedDiskSelected = true; return this; } @Override public VirtualMachineImpl withBootDiagnosticsOnManagedStorageAccount() { this.bootDiagnosticsHandler.withBootDiagnostics(true); return this; } @Override public VirtualMachineImpl withBootDiagnostics() { this.bootDiagnosticsHandler.withBootDiagnostics(false); return this; } @Override public VirtualMachineImpl withBootDiagnostics(Creatable<StorageAccount> creatable) { this.bootDiagnosticsHandler.withBootDiagnostics(creatable); return this; } @Override public VirtualMachineImpl withBootDiagnostics(String storageAccountBlobEndpointUri) { this.bootDiagnosticsHandler.withBootDiagnostics(storageAccountBlobEndpointUri); return this; } @Override public VirtualMachineImpl withBootDiagnostics(StorageAccount storageAccount) { this.bootDiagnosticsHandler.withBootDiagnostics(storageAccount); return this; } @Override public VirtualMachineImpl withoutBootDiagnostics() { this.bootDiagnosticsHandler.withoutBootDiagnostics(); return this; } @Override public VirtualMachineImpl withPriority(VirtualMachinePriorityTypes priority) { this.innerModel().withPriority(priority); return this; } @Override public VirtualMachineImpl withLowPriority() { this.withPriority(VirtualMachinePriorityTypes.LOW); return this; } @Override public VirtualMachineImpl withLowPriority(VirtualMachineEvictionPolicyTypes policy) { this.withLowPriority(); this.innerModel().withEvictionPolicy(policy); return this; } @Override public VirtualMachineImpl withSpotPriority() { this.withPriority(VirtualMachinePriorityTypes.SPOT); return this; } @Override public VirtualMachineImpl withSpotPriority(VirtualMachineEvictionPolicyTypes policy) { this.withSpotPriority(); this.innerModel().withEvictionPolicy(policy); return this; } @Override public VirtualMachineImpl withMaxPrice(Double maxPrice) { this.innerModel().withBillingProfile(new BillingProfile().withMaxPrice(maxPrice)); return this; } @Override public VirtualMachineImpl withSystemAssignedManagedServiceIdentity() { this.virtualMachineMsiHandler.withLocalManagedServiceIdentity(); return this; } @Override public VirtualMachineImpl withoutSystemAssignedManagedServiceIdentity() { this.virtualMachineMsiHandler.withoutLocalManagedServiceIdentity(); return this; } @Override public VirtualMachineImpl withSystemAssignedIdentityBasedAccessTo(String resourceId, BuiltInRole role) { this.virtualMachineMsiHandler.withAccessTo(resourceId, role); return this; } @Override public VirtualMachineImpl withSystemAssignedIdentityBasedAccessToCurrentResourceGroup(BuiltInRole role) { this.virtualMachineMsiHandler.withAccessToCurrentResourceGroup(role); return this; } @Override public VirtualMachineImpl withSystemAssignedIdentityBasedAccessTo(String resourceId, String roleDefinitionId) { this.virtualMachineMsiHandler.withAccessTo(resourceId, roleDefinitionId); return this; } @Override public VirtualMachineImpl withSystemAssignedIdentityBasedAccessToCurrentResourceGroup(String roleDefinitionId) { this.virtualMachineMsiHandler.withAccessToCurrentResourceGroup(roleDefinitionId); return this; } @Override public VirtualMachineImpl withNewUserAssignedManagedServiceIdentity(Creatable<Identity> creatableIdentity) { this.virtualMachineMsiHandler.withNewExternalManagedServiceIdentity(creatableIdentity); return this; } @Override public VirtualMachineImpl withExistingUserAssignedManagedServiceIdentity(Identity identity) { this.virtualMachineMsiHandler.withExistingExternalManagedServiceIdentity(identity); return this; } @Override public VirtualMachineImpl withoutUserAssignedManagedServiceIdentity(String identityId) { this.virtualMachineMsiHandler.withoutExternalManagedServiceIdentity(identityId); return this; } @Override public VirtualMachineImpl withLicenseType(String licenseType) { innerModel().withLicenseType(licenseType); return this; } @Override public VirtualMachineImpl enableHibernation() { if (this.innerModel().additionalCapabilities() == null) { this.innerModel().withAdditionalCapabilities(new AdditionalCapabilities()); } this.innerModel().additionalCapabilities().withHibernationEnabled(true); return this; } @Override public VirtualMachineImpl disableHibernation() { if (this.innerModel().additionalCapabilities() == null) { this.innerModel().withAdditionalCapabilities(new AdditionalCapabilities()); } this.innerModel().additionalCapabilities().withHibernationEnabled(false); return this; } @Override public boolean isManagedDiskEnabled() { if (isOsDiskFromCustomImage(this.innerModel().storageProfile())) { return true; } if (isOSDiskAttachedManaged(this.innerModel().storageProfile().osDisk())) { return true; } if (isOSDiskFromStoredImage(this.innerModel().storageProfile())) { return false; } if (isOSDiskAttachedUnmanaged(this.innerModel().storageProfile().osDisk())) { return false; } if (isOSDiskFromPlatformImage(this.innerModel().storageProfile())) { if (this.isUnmanagedDiskSelected) { return false; } } if (isInCreateMode()) { return true; } else { return this.innerModel().storageProfile().osDisk().vhd() == null; } } @Override public String computerName() { if (innerModel().osProfile() == null) { return null; } return innerModel().osProfile().computerName(); } @Override public VirtualMachineSizeTypes size() { return innerModel().hardwareProfile().vmSize(); } @Override public OperatingSystemTypes osType() { if (innerModel().storageProfile().osDisk().osType() != null) { return innerModel().storageProfile().osDisk().osType(); } if (innerModel().osProfile() != null) { if (innerModel().osProfile().linuxConfiguration() != null) { return OperatingSystemTypes.LINUX; } if (innerModel().osProfile().windowsConfiguration() != null) { return OperatingSystemTypes.WINDOWS; } } return null; } @Override public String osUnmanagedDiskVhdUri() { if (isManagedDiskEnabled() || this.storageProfile().osDisk().vhd() == null) { return null; } return innerModel().storageProfile().osDisk().vhd().uri(); } @Override public CachingTypes osDiskCachingType() { return innerModel().storageProfile().osDisk().caching(); } @Override public int osDiskSize() { return ResourceManagerUtils.toPrimitiveInt(innerModel().storageProfile().osDisk().diskSizeGB()); } @Override public StorageAccountTypes osDiskStorageAccountType() { if (!isManagedDiskEnabled() || this.storageProfile().osDisk().managedDisk() == null) { return null; } return this.storageProfile().osDisk().managedDisk().storageAccountType(); } @Override public String osDiskId() { if (!isManagedDiskEnabled()) { return null; } return this.storageProfile().osDisk().managedDisk().id(); } @Override public Map<Integer, VirtualMachineUnmanagedDataDisk> unmanagedDataDisks() { Map<Integer, VirtualMachineUnmanagedDataDisk> dataDisks = new HashMap<>(); if (!isManagedDiskEnabled()) { for (VirtualMachineUnmanagedDataDisk dataDisk : this.unmanagedDataDisks) { dataDisks.put(dataDisk.lun(), dataDisk); } } return Collections.unmodifiableMap(dataDisks); } @Override public Map<Integer, VirtualMachineDataDisk> dataDisks() { Map<Integer, VirtualMachineDataDisk> dataDisks = new HashMap<>(); if (isManagedDiskEnabled()) { List<DataDisk> innerDataDisks = this.innerModel().storageProfile().dataDisks(); if (innerDataDisks != null) { for (DataDisk innerDataDisk : innerDataDisks) { dataDisks.put(innerDataDisk.lun(), new VirtualMachineDataDiskImpl(innerDataDisk)); } } } return Collections.unmodifiableMap(dataDisks); } @Override public NetworkInterface getPrimaryNetworkInterface() { return this.getPrimaryNetworkInterfaceAsync().block(); } @Override public Mono<NetworkInterface> getPrimaryNetworkInterfaceAsync() { return this.networkManager.networkInterfaces().getByIdAsync(primaryNetworkInterfaceId()); } @Override public PublicIpAddress getPrimaryPublicIPAddress() { return this.getPrimaryNetworkInterface().primaryIPConfiguration().getPublicIpAddress(); } @Override public String getPrimaryPublicIPAddressId() { return this.getPrimaryNetworkInterface().primaryIPConfiguration().publicIpAddressId(); } @Override public List<String> networkInterfaceIds() { List<String> nicIds = new ArrayList<>(); for (NetworkInterfaceReference nicRef : innerModel().networkProfile().networkInterfaces()) { nicIds.add(nicRef.id()); } return nicIds; } @Override public String primaryNetworkInterfaceId() { final List<NetworkInterfaceReference> nicRefs = this.innerModel().networkProfile().networkInterfaces(); String primaryNicRefId = null; if (nicRefs.size() == 1) { primaryNicRefId = nicRefs.get(0).id(); } else if (nicRefs.size() == 0) { primaryNicRefId = null; } else { for (NetworkInterfaceReference nicRef : innerModel().networkProfile().networkInterfaces()) { if (nicRef.primary() != null && nicRef.primary()) { primaryNicRefId = nicRef.id(); break; } } if (primaryNicRefId == null) { primaryNicRefId = nicRefs.get(0).id(); } } return primaryNicRefId; } @Override public String availabilitySetId() { if (innerModel().availabilitySet() != null) { return innerModel().availabilitySet().id(); } return null; } @Override public String provisioningState() { return innerModel().provisioningState(); } @Override public String licenseType() { return innerModel().licenseType(); } @Override public ProximityPlacementGroup proximityPlacementGroup() { if (innerModel().proximityPlacementGroup() == null) { return null; } else { ResourceId id = ResourceId.fromString(innerModel().proximityPlacementGroup().id()); ProximityPlacementGroupInner plgInner = manager() .serviceClient() .getProximityPlacementGroups() .getByResourceGroup(id.resourceGroupName(), id.name()); if (plgInner == null) { return null; } else { return new ProximityPlacementGroupImpl(plgInner); } } } @Override public Mono<List<VirtualMachineExtension>> listExtensionsAsync() { return this.virtualMachineExtensions.listAsync(); } @Override public Map<String, VirtualMachineExtension> listExtensions() { return this.virtualMachineExtensions.asMap(); } @Override public Plan plan() { return innerModel().plan(); } @Override public StorageProfile storageProfile() { return innerModel().storageProfile(); } @Override public OSProfile osProfile() { return innerModel().osProfile(); } @Override public DiagnosticsProfile diagnosticsProfile() { return innerModel().diagnosticsProfile(); } @Override public String vmId() { return innerModel().vmId(); } @Override public VirtualMachineInstanceView instanceView() { if (this.virtualMachineInstanceView == null) { this.refreshInstanceView(); } return this.virtualMachineInstanceView; } @Override public Set<AvailabilityZoneId> availabilityZones() { Set<AvailabilityZoneId> zones = new HashSet<>(); if (this.innerModel().zones() != null) { for (String zone : this.innerModel().zones()) { zones.add(AvailabilityZoneId.fromString(zone)); } } return Collections.unmodifiableSet(zones); } @Override public PowerState powerState() { return PowerState.fromInstanceView(this.instanceView()); } @Override public boolean isBootDiagnosticsEnabled() { return this.bootDiagnosticsHandler.isBootDiagnosticsEnabled(); } @Override public String bootDiagnosticsStorageUri() { return this.bootDiagnosticsHandler.bootDiagnosticsStorageUri(); } @Override public boolean isManagedServiceIdentityEnabled() { ResourceIdentityType type = this.managedServiceIdentityType(); return type != null && !type.equals(ResourceIdentityType.NONE); } @Override public String systemAssignedManagedServiceIdentityTenantId() { if (this.innerModel().identity() != null) { return this.innerModel().identity().tenantId(); } return null; } @Override public String systemAssignedManagedServiceIdentityPrincipalId() { if (this.innerModel().identity() != null) { return this.innerModel().identity().principalId(); } return null; } @Override public ResourceIdentityType managedServiceIdentityType() { if (this.innerModel().identity() != null) { return this.innerModel().identity().type(); } return null; } @Override public Set<String> userAssignedManagedServiceIdentityIds() { if (this.innerModel().identity() != null && this.innerModel().identity().userAssignedIdentities() != null) { return Collections .unmodifiableSet(new HashSet<String>(this.innerModel().identity().userAssignedIdentities().keySet())); } return Collections.unmodifiableSet(new HashSet<String>()); } @Override public BillingProfile billingProfile() { return this.innerModel().billingProfile(); } @Override public boolean isHibernationEnabled() { return this.innerModel().additionalCapabilities() != null && ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().additionalCapabilities().hibernationEnabled()); } @Override public VirtualMachinePriorityTypes priority() { return this.innerModel().priority(); } @Override public VirtualMachineEvictionPolicyTypes evictionPolicy() { return this.innerModel().evictionPolicy(); } @Override public void beforeGroupCreateOrUpdate() { if (creatableStorageAccountKey == null && existingStorageAccountToAssociate == null) { if (osDiskRequiresImplicitStorageAccountCreation() || dataDisksRequiresImplicitStorageAccountCreation()) { Creatable<StorageAccount> storageAccountCreatable = null; if (this.creatableGroup != null) { storageAccountCreatable = this .storageManager .storageAccounts() .define(this.namer.getRandomName("stg", 24).replace("-", "")) .withRegion(this.regionName()) .withNewResourceGroup(this.creatableGroup); } else { storageAccountCreatable = this .storageManager .storageAccounts() .define(this.namer.getRandomName("stg", 24).replace("-", "")) .withRegion(this.regionName()) .withExistingResourceGroup(this.resourceGroupName()); } this.creatableStorageAccountKey = this.addDependency(storageAccountCreatable); } } this.bootDiagnosticsHandler.prepare(); } @Override public Mono<VirtualMachine> createResourceAsync() { return prepareCreateResourceAsync() .flatMap( virtualMachine -> this .manager() .serviceClient() .getVirtualMachines() .createOrUpdateAsync(resourceGroupName(), vmName, innerModel()) .map( virtualMachineInner -> { reset(virtualMachineInner); return this; })); } private Mono<VirtualMachine> prepareCreateResourceAsync() { setOSDiskDefaults(); setOSProfileDefaults(); setHardwareProfileDefaults(); if (isManagedDiskEnabled()) { managedDataDisks.setDataDisksDefaults(); } else { UnmanagedDataDiskImpl.setDataDisksDefaults(this.unmanagedDataDisks, this.vmName); } this.handleUnManagedOSAndDataDisksStorageSettings(); this.bootDiagnosticsHandler.handleDiagnosticsSettings(); this.handleNetworkSettings(); return this .createNewProximityPlacementGroupAsync() .map( virtualMachine -> { this.handleAvailabilitySettings(); this.virtualMachineMsiHandler.processCreatedExternalIdentities(); this.virtualMachineMsiHandler.handleExternalIdentities(); return virtualMachine; }); } public Accepted<VirtualMachine> beginCreate() { return AcceptedImpl .<VirtualMachine, VirtualMachineInner>newAccepted( logger, this.manager().serviceClient().getHttpPipeline(), this.manager().serviceClient().getDefaultPollInterval(), () -> this .manager() .serviceClient() .getVirtualMachines() .createOrUpdateWithResponseAsync(resourceGroupName(), vmName, innerModel()) .block(), inner -> new VirtualMachineImpl( inner.name(), inner, this.manager(), this.storageManager, this.networkManager, this.authorizationManager), VirtualMachineInner.class, () -> { Flux<Indexable> dependencyTasksAsync = taskGroup().invokeDependencyAsync(taskGroup().newInvocationContext()); dependencyTasksAsync.blockLast(); prepareCreateResourceAsync().block(); }, this::reset, Context.NONE); } @Override public Mono<VirtualMachine> updateResourceAsync() { if (isManagedDiskEnabled()) { managedDataDisks.setDataDisksDefaults(); } else { UnmanagedDataDiskImpl.setDataDisksDefaults(this.unmanagedDataDisks, this.vmName); } this.handleUnManagedOSAndDataDisksStorageSettings(); this.bootDiagnosticsHandler.handleDiagnosticsSettings(); this.handleNetworkSettings(); this.handleAvailabilitySettings(); this.virtualMachineMsiHandler.processCreatedExternalIdentities(); VirtualMachineUpdateInner updateParameter = new VirtualMachineUpdateInner(); this.copyInnerToUpdateParameter(updateParameter); this.virtualMachineMsiHandler.handleExternalIdentities(updateParameter); final boolean vmModified = this.isVirtualMachineModifiedDuringUpdate(updateParameter); if (vmModified) { return this .manager() .serviceClient() .getVirtualMachines() .updateAsync(resourceGroupName(), vmName, updateParameter) .map( virtualMachineInner -> { reset(virtualMachineInner); return this; }); } else { return Mono.just(this); } } @Override public Mono<Void> afterPostRunAsync(boolean isGroupFaulted) { this.virtualMachineExtensions.clear(); if (isGroupFaulted) { return Mono.empty(); } else { return this.refreshAsync().then(); } } VirtualMachineImpl withExtension(VirtualMachineExtensionImpl extension) { this.virtualMachineExtensions.addExtension(extension); return this; } private void reset(VirtualMachineInner inner) { this.setInner(inner); clearCachedRelatedResources(); initializeDataDisks(); virtualMachineMsiHandler.clear(); } VirtualMachineImpl withUnmanagedDataDisk(UnmanagedDataDiskImpl dataDisk) { this.innerModel().storageProfile().dataDisks().add(dataDisk.innerModel()); this.unmanagedDataDisks.add(dataDisk); return this; } @Override public VirtualMachineImpl withAvailabilityZone(AvailabilityZoneId zoneId) { if (isInCreateMode()) { if (this.innerModel().zones() == null) { this.innerModel().withZones(new ArrayList<String>()); } this.innerModel().zones().add(zoneId.toString()); if (this.implicitPipCreatable != null) { this.implicitPipCreatable.withAvailabilityZone(zoneId); } } return this; } AzureEnvironment environment() { return manager().environment(); } private void setOSDiskDefaults() { if (isInUpdateMode()) { return; } StorageProfile storageProfile = this.innerModel().storageProfile(); OSDisk osDisk = storageProfile.osDisk(); if (isOSDiskFromImage(osDisk)) { if (isManagedDiskEnabled()) { if (osDisk.managedDisk() == null) { osDisk.withManagedDisk(new ManagedDiskParameters()); } if (osDisk.managedDisk().storageAccountType() == null) { osDisk.managedDisk().withStorageAccountType(StorageAccountTypes.STANDARD_LRS); } osDisk.withVhd(null); } else { if (isOSDiskFromPlatformImage(storageProfile) || isOSDiskFromStoredImage(storageProfile)) { if (osDisk.vhd() == null) { String osDiskVhdContainerName = "vhds"; String osDiskVhdName = this.vmName + "-os-disk-" + UUID.randomUUID().toString() + ".vhd"; withOSDiskVhdLocation(osDiskVhdContainerName, osDiskVhdName); } osDisk.withManagedDisk(null); } if (osDisk.name() == null) { withOSDiskName(this.vmName + "-os-disk"); } } } else { if (isManagedDiskEnabled()) { if (osDisk.managedDisk() != null) { osDisk.managedDisk().withStorageAccountType(null); } osDisk.withVhd(null); } else { osDisk.withManagedDisk(null); if (osDisk.name() == null) { withOSDiskName(this.vmName + "-os-disk"); } } } if (osDisk.caching() == null) { withOSDiskCaching(CachingTypes.READ_WRITE); } } private void setOSProfileDefaults() { if (isInUpdateMode()) { return; } StorageProfile storageProfile = this.innerModel().storageProfile(); OSDisk osDisk = storageProfile.osDisk(); if (!removeOsProfile && isOSDiskFromImage(osDisk)) { if (osDisk.osType() == OperatingSystemTypes.LINUX || this.isMarketplaceLinuxImage) { OSProfile osProfile = this.innerModel().osProfile(); if (osProfile.linuxConfiguration() == null) { osProfile.withLinuxConfiguration(new LinuxConfiguration()); } this .innerModel() .osProfile() .linuxConfiguration() .withDisablePasswordAuthentication(osProfile.adminPassword() == null); } if (this.innerModel().osProfile().computerName() == null) { if (vmName.matches("[0-9]+")) { this.innerModel().osProfile().withComputerName(namer.getRandomName("vm", 15)); } else if (vmName.length() <= 15) { this.innerModel().osProfile().withComputerName(vmName); } else { this.innerModel().osProfile().withComputerName(namer.getRandomName("vm", 15)); } } } else { this.innerModel().withOsProfile(null); } } private void setHardwareProfileDefaults() { if (!isInCreateMode()) { return; } HardwareProfile hardwareProfile = this.innerModel().hardwareProfile(); if (hardwareProfile.vmSize() == null) { hardwareProfile.withVmSize(VirtualMachineSizeTypes.BASIC_A0); } } /** Prepare virtual machine disks profile (StorageProfile). */ private void handleUnManagedOSAndDataDisksStorageSettings() { if (isManagedDiskEnabled()) { return; } StorageAccount storageAccount = null; if (this.creatableStorageAccountKey != null) { storageAccount = this.taskResult(this.creatableStorageAccountKey); } else if (this.existingStorageAccountToAssociate != null) { storageAccount = this.existingStorageAccountToAssociate; } if (isInCreateMode()) { if (storageAccount != null) { if (isOSDiskFromPlatformImage(innerModel().storageProfile())) { String uri = innerModel() .storageProfile() .osDisk() .vhd() .uri() .replaceFirst("\\{storage-base-url}", storageAccount.endPoints().primary().blob()); innerModel().storageProfile().osDisk().vhd().withUri(uri); } UnmanagedDataDiskImpl.ensureDisksVhdUri(unmanagedDataDisks, storageAccount, vmName); } } else { if (storageAccount != null) { UnmanagedDataDiskImpl.ensureDisksVhdUri(unmanagedDataDisks, storageAccount, vmName); } else { UnmanagedDataDiskImpl.ensureDisksVhdUri(unmanagedDataDisks, vmName); } } } private Mono<VirtualMachineImpl> createNewProximityPlacementGroupAsync() { if (isInCreateMode()) { if (this.newProximityPlacementGroupName != null && !this.newProximityPlacementGroupName.isEmpty()) { ProximityPlacementGroupInner plgInner = new ProximityPlacementGroupInner(); plgInner.withProximityPlacementGroupType(this.newProximityPlacementGroupType); plgInner.withLocation(this.innerModel().location()); return this .manager() .serviceClient() .getProximityPlacementGroups() .createOrUpdateAsync(this.resourceGroupName(), this.newProximityPlacementGroupName, plgInner) .map( createdPlgInner -> { this .innerModel() .withProximityPlacementGroup(new SubResource().withId(createdPlgInner.id())); return this; }); } } return Mono.just(this); } private void handleNetworkSettings() { if (isInCreateMode()) { NetworkInterface primaryNetworkInterface = null; if (this.creatablePrimaryNetworkInterfaceKey != null) { primaryNetworkInterface = this.taskResult(this.creatablePrimaryNetworkInterfaceKey); } else if (this.existingPrimaryNetworkInterfaceToAssociate != null) { primaryNetworkInterface = this.existingPrimaryNetworkInterfaceToAssociate; } if (primaryNetworkInterface != null) { NetworkInterfaceReference nicReference = new NetworkInterfaceReference(); nicReference.withPrimary(true); nicReference.withId(primaryNetworkInterface.id()); this.innerModel().networkProfile().networkInterfaces().add(nicReference); } } for (String creatableSecondaryNetworkInterfaceKey : this.creatableSecondaryNetworkInterfaceKeys) { NetworkInterface secondaryNetworkInterface = this.taskResult(creatableSecondaryNetworkInterfaceKey); NetworkInterfaceReference nicReference = new NetworkInterfaceReference(); nicReference.withPrimary(false); nicReference.withId(secondaryNetworkInterface.id()); this.innerModel().networkProfile().networkInterfaces().add(nicReference); } for (NetworkInterface secondaryNetworkInterface : this.existingSecondaryNetworkInterfacesToAssociate) { NetworkInterfaceReference nicReference = new NetworkInterfaceReference(); nicReference.withPrimary(false); nicReference.withId(secondaryNetworkInterface.id()); this.innerModel().networkProfile().networkInterfaces().add(nicReference); } } private void handleAvailabilitySettings() { if (!isInCreateMode()) { return; } AvailabilitySet availabilitySet = null; if (this.creatableAvailabilitySetKey != null) { availabilitySet = this.taskResult(this.creatableAvailabilitySetKey); } else if (this.existingAvailabilitySetToAssociate != null) { availabilitySet = this.existingAvailabilitySetToAssociate; } if (availabilitySet != null) { if (this.innerModel().availabilitySet() == null) { this.innerModel().withAvailabilitySet(new SubResource()); } this.innerModel().availabilitySet().withId(availabilitySet.id()); } } private boolean osDiskRequiresImplicitStorageAccountCreation() { if (isManagedDiskEnabled()) { return false; } if (this.creatableStorageAccountKey != null || this.existingStorageAccountToAssociate != null || !isInCreateMode()) { return false; } return isOSDiskFromPlatformImage(this.innerModel().storageProfile()); } private boolean dataDisksRequiresImplicitStorageAccountCreation() { if (isManagedDiskEnabled()) { return false; } if (this.creatableStorageAccountKey != null || this.existingStorageAccountToAssociate != null || this.unmanagedDataDisks.size() == 0) { return false; } boolean hasEmptyVhd = false; for (VirtualMachineUnmanagedDataDisk dataDisk : this.unmanagedDataDisks) { if (dataDisk.creationMethod() == DiskCreateOptionTypes.EMPTY || dataDisk.creationMethod() == DiskCreateOptionTypes.FROM_IMAGE) { if (dataDisk.innerModel().vhd() == null) { hasEmptyVhd = true; break; } } } if (isInCreateMode()) { return hasEmptyVhd; } if (hasEmptyVhd) { for (VirtualMachineUnmanagedDataDisk dataDisk : this.unmanagedDataDisks) { if (dataDisk.creationMethod() == DiskCreateOptionTypes.ATTACH && dataDisk.innerModel().vhd() != null) { return false; } } return true; } return false; } /** * Checks whether the OS disk is directly attached to a unmanaged VHD. * * @param osDisk the osDisk value in the storage profile * @return true if the OS disk is attached to a unmanaged VHD, false otherwise */ private boolean isOSDiskAttachedUnmanaged(OSDisk osDisk) { return osDisk.createOption() == DiskCreateOptionTypes.ATTACH && osDisk.vhd() != null && osDisk.vhd().uri() != null; } /** * Checks whether the OS disk is directly attached to a managed disk. * * @param osDisk the osDisk value in the storage profile * @return true if the OS disk is attached to a managed disk, false otherwise */ private boolean isOSDiskAttachedManaged(OSDisk osDisk) { return osDisk.createOption() == DiskCreateOptionTypes.ATTACH && osDisk.managedDisk() != null && osDisk.managedDisk().id() != null; } /** * Checks whether the OS disk is based on an image (image from PIR or custom image [captured, bringYourOwnFeature]). * * @param osDisk the osDisk value in the storage profile * @return true if the OS disk is configured to use image from PIR or custom image */ private boolean isOSDiskFromImage(OSDisk osDisk) { return osDisk.createOption() == DiskCreateOptionTypes.FROM_IMAGE; } /** * Checks whether the OS disk is based on an platform image (image in PIR). * * @param storageProfile the storage profile * @return true if the OS disk is configured to be based on platform image. */ private boolean isOSDiskFromPlatformImage(StorageProfile storageProfile) { ImageReference imageReference = storageProfile.imageReference(); return isOSDiskFromImage(storageProfile.osDisk()) && imageReference != null && imageReference.publisher() != null && imageReference.offer() != null && imageReference.sku() != null && imageReference.version() != null; } /** * Checks whether the OS disk is based on a CustomImage. * * <p>A custom image is represented by {@link VirtualMachineCustomImage}. * * @param storageProfile the storage profile * @return true if the OS disk is configured to be based on custom image. */ private boolean isOsDiskFromCustomImage(StorageProfile storageProfile) { ImageReference imageReference = storageProfile.imageReference(); return isOSDiskFromImage(storageProfile.osDisk()) && imageReference != null && imageReference.id() != null; } /** * Checks whether the OS disk is based on a stored image ('captured' or 'bring your own feature'). * * <p>A stored image is created by calling {@link VirtualMachine * * @param storageProfile the storage profile * @return true if the OS disk is configured to use custom image ('captured' or 'bring your own feature') */ private boolean isOSDiskFromStoredImage(StorageProfile storageProfile) { OSDisk osDisk = storageProfile.osDisk(); return isOSDiskFromImage(osDisk) && osDisk.image() != null && osDisk.image().uri() != null; } private String temporaryBlobUrl(String containerName, String blobName) { return "{storage-base-url}" + containerName + "/" + blobName; } private NetworkInterface.DefinitionStages.WithPrimaryPublicIPAddress prepareNetworkInterface(String name) { NetworkInterface.DefinitionStages.WithGroup definitionWithGroup = this.networkManager.networkInterfaces().define(name).withRegion(this.regionName()); NetworkInterface.DefinitionStages.WithPrimaryNetwork definitionWithNetwork; if (this.creatableGroup != null) { definitionWithNetwork = definitionWithGroup.withNewResourceGroup(this.creatableGroup); } else { definitionWithNetwork = definitionWithGroup.withExistingResourceGroup(this.resourceGroupName()); } return definitionWithNetwork.withNewPrimaryNetwork("vnet" + name).withPrimaryPrivateIPAddressDynamic(); } private void initializeDataDisks() { if (this.innerModel().storageProfile().dataDisks() == null) { this.innerModel().storageProfile().withDataDisks(new ArrayList<>()); } this.isUnmanagedDiskSelected = false; this.managedDataDisks.clear(); this.unmanagedDataDisks = new ArrayList<>(); if (!isManagedDiskEnabled()) { for (DataDisk dataDiskInner : this.storageProfile().dataDisks()) { this.unmanagedDataDisks.add(new UnmanagedDataDiskImpl(dataDiskInner, this)); } } } private NetworkInterface.DefinitionStages.WithPrimaryNetwork preparePrimaryNetworkInterface(String name) { NetworkInterface.DefinitionStages.WithGroup definitionWithGroup = this.networkManager.networkInterfaces().define(name).withRegion(this.regionName()); NetworkInterface.DefinitionStages.WithPrimaryNetwork definitionAfterGroup; if (this.creatableGroup != null) { definitionAfterGroup = definitionWithGroup.withNewResourceGroup(this.creatableGroup); } else { definitionAfterGroup = definitionWithGroup.withExistingResourceGroup(this.resourceGroupName()); } return definitionAfterGroup; } private void clearCachedRelatedResources() { this.virtualMachineInstanceView = null; } private void throwIfManagedDiskEnabled(String message) { if (this.isManagedDiskEnabled()) { throw logger.logExceptionAsError(new UnsupportedOperationException(message)); } } private void throwIfManagedDiskDisabled(String message) { if (!this.isManagedDiskEnabled()) { throw logger.logExceptionAsError(new UnsupportedOperationException(message)); } } private boolean isInUpdateMode() { return !this.isInCreateMode(); } boolean isVirtualMachineModifiedDuringUpdate(VirtualMachineUpdateInner updateParameter) { if (updateParameterSnapshotOnUpdate == null || updateParameter == null) { return true; } else { try { String jsonStrSnapshot = SERIALIZER_ADAPTER.serialize(updateParameterSnapshotOnUpdate, SerializerEncoding.JSON); String jsonStr = SERIALIZER_ADAPTER.serialize(updateParameter, SerializerEncoding.JSON); return !jsonStr.equals(jsonStrSnapshot); } catch (IOException e) { return true; } } } VirtualMachineUpdateInner deepCopyInnerToUpdateParameter() { VirtualMachineUpdateInner updateParameter = new VirtualMachineUpdateInner(); copyInnerToUpdateParameter(updateParameter); try { String jsonStr = SERIALIZER_ADAPTER.serialize(updateParameter, SerializerEncoding.JSON); updateParameter = SERIALIZER_ADAPTER.deserialize(jsonStr, VirtualMachineUpdateInner.class, SerializerEncoding.JSON); } catch (IOException e) { return null; } if (this.innerModel().identity() != null) { VirtualMachineIdentity identity = new VirtualMachineIdentity(); identity.withType(this.innerModel().identity().type()); updateParameter.withIdentity(identity); } return updateParameter; } RoleAssignmentHelper.IdProvider idProvider() { return new RoleAssignmentHelper.IdProvider() { @Override public String principalId() { if (innerModel() != null && innerModel().identity() != null) { return innerModel().identity().principalId(); } else { return null; } } @Override public String resourceId() { if (innerModel() != null) { return innerModel().id(); } else { return null; } } }; } /** Class to manage Data disk collection. */ private class ManagedDataDiskCollection { private final Map<String, DataDisk> newDisksToAttach = new HashMap<>(); private final List<DataDisk> existingDisksToAttach = new ArrayList<>(); private final List<DataDisk> implicitDisksToAssociate = new ArrayList<>(); private final List<Integer> diskLunsToRemove = new ArrayList<>(); private final List<DataDisk> newDisksFromImage = new ArrayList<>(); private final VirtualMachineImpl vm; private CachingTypes defaultCachingType; private StorageAccountTypes defaultStorageAccountType; ManagedDataDiskCollection(VirtualMachineImpl vm) { this.vm = vm; } void setDefaultCachingType(CachingTypes cachingType) { this.defaultCachingType = cachingType; } void setDefaultStorageAccountType(StorageAccountTypes defaultStorageAccountType) { this.defaultStorageAccountType = defaultStorageAccountType; } void setDataDisksDefaults() { VirtualMachineInner vmInner = this.vm.innerModel(); if (isPending()) { if (vmInner.storageProfile().dataDisks() == null) { vmInner.storageProfile().withDataDisks(new ArrayList<>()); } List<DataDisk> dataDisks = vmInner.storageProfile().dataDisks(); final List<Integer> usedLuns = new ArrayList<>(); for (DataDisk dataDisk : dataDisks) { if (dataDisk.lun() != -1) { usedLuns.add(dataDisk.lun()); } } for (DataDisk dataDisk : this.newDisksToAttach.values()) { if (dataDisk.lun() != -1) { usedLuns.add(dataDisk.lun()); } } for (DataDisk dataDisk : this.existingDisksToAttach) { if (dataDisk.lun() != -1) { usedLuns.add(dataDisk.lun()); } } for (DataDisk dataDisk : this.implicitDisksToAssociate) { if (dataDisk.lun() != -1) { usedLuns.add(dataDisk.lun()); } } for (DataDisk dataDisk : this.newDisksFromImage) { if (dataDisk.lun() != -1) { usedLuns.add(dataDisk.lun()); } } Callable<Integer> nextLun = () -> { Integer lun = 0; while (usedLuns.contains(lun)) { lun++; } usedLuns.add(lun); return lun; }; try { setAttachableNewDataDisks(nextLun); setAttachableExistingDataDisks(nextLun); setImplicitDataDisks(nextLun); } catch (Exception ex) { throw logger.logExceptionAsError(Exceptions.propagate(ex)); } setImageBasedDataDisks(); removeDataDisks(); } if (vmInner.storageProfile().dataDisks() != null && vmInner.storageProfile().dataDisks().size() == 0) { if (vm.isInCreateMode()) { vmInner.storageProfile().withDataDisks(null); } } this.clear(); } private void clear() { newDisksToAttach.clear(); existingDisksToAttach.clear(); implicitDisksToAssociate.clear(); diskLunsToRemove.clear(); newDisksFromImage.clear(); } private boolean isPending() { return newDisksToAttach.size() > 0 || existingDisksToAttach.size() > 0 || implicitDisksToAssociate.size() > 0 || diskLunsToRemove.size() > 0 || newDisksFromImage.size() > 0; } private void setAttachableNewDataDisks(Callable<Integer> nextLun) throws Exception { List<DataDisk> dataDisks = vm.innerModel().storageProfile().dataDisks(); for (Map.Entry<String, DataDisk> entry : this.newDisksToAttach.entrySet()) { Disk managedDisk = vm.taskResult(entry.getKey()); DataDisk dataDisk = entry.getValue(); dataDisk.withCreateOption(DiskCreateOptionTypes.ATTACH); if (dataDisk.lun() == -1) { dataDisk.withLun(nextLun.call()); } dataDisk.withManagedDisk(new ManagedDiskParameters()); dataDisk.managedDisk().withId(managedDisk.id()); if (dataDisk.caching() == null) { dataDisk.withCaching(getDefaultCachingType()); } dataDisk.withName(null); dataDisks.add(dataDisk); } } private void setAttachableExistingDataDisks(Callable<Integer> nextLun) throws Exception { List<DataDisk> dataDisks = vm.innerModel().storageProfile().dataDisks(); for (DataDisk dataDisk : this.existingDisksToAttach) { dataDisk.withCreateOption(DiskCreateOptionTypes.ATTACH); if (dataDisk.lun() == -1) { dataDisk.withLun(nextLun.call()); } if (dataDisk.caching() == null) { dataDisk.withCaching(getDefaultCachingType()); } dataDisk.withName(null); dataDisks.add(dataDisk); } } private void setImplicitDataDisks(Callable<Integer> nextLun) throws Exception { List<DataDisk> dataDisks = vm.innerModel().storageProfile().dataDisks(); for (DataDisk dataDisk : this.implicitDisksToAssociate) { dataDisk.withCreateOption(DiskCreateOptionTypes.EMPTY); if (dataDisk.lun() == -1) { dataDisk.withLun(nextLun.call()); } if (dataDisk.caching() == null) { dataDisk.withCaching(getDefaultCachingType()); } if (dataDisk.managedDisk() == null) { dataDisk.withManagedDisk(new ManagedDiskParameters()); } if (dataDisk.managedDisk().storageAccountType() == null) { dataDisk.managedDisk().withStorageAccountType(getDefaultStorageAccountType()); } dataDisk.withName(null); dataDisks.add(dataDisk); } } private void setImageBasedDataDisks() { List<DataDisk> dataDisks = vm.innerModel().storageProfile().dataDisks(); for (DataDisk dataDisk : this.newDisksFromImage) { dataDisk.withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); dataDisk.withName(null); dataDisks.add(dataDisk); } } private void removeDataDisks() { List<DataDisk> dataDisks = vm.innerModel().storageProfile().dataDisks(); for (Integer lun : this.diskLunsToRemove) { int indexToRemove = 0; for (DataDisk dataDisk : dataDisks) { if (dataDisk.lun() == lun) { dataDisks.remove(indexToRemove); break; } indexToRemove++; } } } private CachingTypes getDefaultCachingType() { if (defaultCachingType == null) { return CachingTypes.READ_WRITE; } return defaultCachingType; } private StorageAccountTypes getDefaultStorageAccountType() { if (defaultStorageAccountType == null) { return StorageAccountTypes.STANDARD_LRS; } return defaultStorageAccountType; } } /** Class to manage VM boot diagnostics settings. */ private class BootDiagnosticsHandler { private final VirtualMachineImpl vmImpl; private String creatableDiagnosticsStorageAccountKey; private boolean useManagedStorageAccount = false; BootDiagnosticsHandler(VirtualMachineImpl vmImpl) { this.vmImpl = vmImpl; if (isBootDiagnosticsEnabled() && this.vmInner().diagnosticsProfile().bootDiagnostics().storageUri() == null) { this.useManagedStorageAccount = true; } } public boolean isBootDiagnosticsEnabled() { if (this.vmInner().diagnosticsProfile() != null && this.vmInner().diagnosticsProfile().bootDiagnostics() != null && this.vmInner().diagnosticsProfile().bootDiagnostics().enabled() != null) { return this.vmInner().diagnosticsProfile().bootDiagnostics().enabled(); } return false; } public String bootDiagnosticsStorageUri() { if (this.vmInner().diagnosticsProfile() != null && this.vmInner().diagnosticsProfile().bootDiagnostics() != null) { return this.vmInner().diagnosticsProfile().bootDiagnostics().storageUri(); } return null; } BootDiagnosticsHandler withBootDiagnostics(boolean useManagedStorageAccount) { this.enableDisable(true); this.useManagedStorageAccount = useManagedStorageAccount; return this; } BootDiagnosticsHandler withBootDiagnostics(Creatable<StorageAccount> creatable) { this.enableDisable(true); this.useManagedStorageAccount = false; this.creatableDiagnosticsStorageAccountKey = this.vmImpl.addDependency(creatable); return this; } BootDiagnosticsHandler withBootDiagnostics(String storageAccountBlobEndpointUri) { this.enableDisable(true); this.useManagedStorageAccount = false; this.vmInner().diagnosticsProfile().bootDiagnostics().withStorageUri(storageAccountBlobEndpointUri); return this; } BootDiagnosticsHandler withBootDiagnostics(StorageAccount storageAccount) { return this.withBootDiagnostics(storageAccount.endPoints().primary().blob()); } BootDiagnosticsHandler withoutBootDiagnostics() { this.enableDisable(false); this.useManagedStorageAccount = false; return this; } void prepare() { if (useManagedStorageAccount) { return; } DiagnosticsProfile diagnosticsProfile = this.vmInner().diagnosticsProfile(); if (diagnosticsProfile == null || diagnosticsProfile.bootDiagnostics() == null || diagnosticsProfile.bootDiagnostics().storageUri() != null) { return; } boolean enableBD = ResourceManagerUtils.toPrimitiveBoolean(diagnosticsProfile.bootDiagnostics().enabled()); if (!enableBD) { return; } if (this.creatableDiagnosticsStorageAccountKey != null || this.vmImpl.creatableStorageAccountKey != null || this.vmImpl.existingStorageAccountToAssociate != null) { return; } String accountName = this.vmImpl.namer.getRandomName("stg", 24).replace("-", ""); Creatable<StorageAccount> storageAccountCreatable; if (this.vmImpl.creatableGroup != null) { storageAccountCreatable = this .vmImpl .storageManager .storageAccounts() .define(accountName) .withRegion(this.vmImpl.regionName()) .withNewResourceGroup(this.vmImpl.creatableGroup); } else { storageAccountCreatable = this .vmImpl .storageManager .storageAccounts() .define(accountName) .withRegion(this.vmImpl.regionName()) .withExistingResourceGroup(this.vmImpl.resourceGroupName()); } this.creatableDiagnosticsStorageAccountKey = this.vmImpl.addDependency(storageAccountCreatable); } void handleDiagnosticsSettings() { if (useManagedStorageAccount) { return; } DiagnosticsProfile diagnosticsProfile = this.vmInner().diagnosticsProfile(); if (diagnosticsProfile == null || diagnosticsProfile.bootDiagnostics() == null || diagnosticsProfile.bootDiagnostics().storageUri() != null) { return; } boolean enableBD = ResourceManagerUtils.toPrimitiveBoolean(diagnosticsProfile.bootDiagnostics().enabled()); if (!enableBD) { return; } StorageAccount storageAccount = null; if (creatableDiagnosticsStorageAccountKey != null) { storageAccount = this.vmImpl.<StorageAccount>taskResult(creatableDiagnosticsStorageAccountKey); } else if (this.vmImpl.creatableStorageAccountKey != null) { storageAccount = this.vmImpl.<StorageAccount>taskResult(this.vmImpl.creatableStorageAccountKey); } else if (this.vmImpl.existingStorageAccountToAssociate != null) { storageAccount = this.vmImpl.existingStorageAccountToAssociate; } if (storageAccount == null) { throw logger .logExceptionAsError( new IllegalStateException( "Unable to retrieve expected storageAccount instance for BootDiagnostics")); } vmInner() .diagnosticsProfile() .bootDiagnostics() .withStorageUri(storageAccount.endPoints().primary().blob()); } private VirtualMachineInner vmInner() { return this.vmImpl.innerModel(); } private void enableDisable(boolean enable) { if (this.vmInner().diagnosticsProfile() == null) { this.vmInner().withDiagnosticsProfile(new DiagnosticsProfile()); } if (this.vmInner().diagnosticsProfile().bootDiagnostics() == null) { this.vmInner().diagnosticsProfile().withBootDiagnostics(new BootDiagnostics()); } if (enable) { this.vmInner().diagnosticsProfile().bootDiagnostics().withEnabled(true); } else { this.vmInner().diagnosticsProfile().bootDiagnostics().withEnabled(false); this.vmInner().diagnosticsProfile().bootDiagnostics().withStorageUri(null); } } } }
class VirtualMachineImpl extends GroupableResourceImpl<VirtualMachine, VirtualMachineInner, VirtualMachineImpl, ComputeManager> implements VirtualMachine, VirtualMachine.DefinitionManagedOrUnmanaged, VirtualMachine.DefinitionManaged, VirtualMachine.DefinitionUnmanaged, VirtualMachine.Update, VirtualMachine.DefinitionStages.WithSystemAssignedIdentityBasedAccessOrCreate, VirtualMachine.UpdateStages.WithSystemAssignedIdentityBasedAccessOrUpdate { private final ClientLogger logger = new ClientLogger(VirtualMachineImpl.class); private final StorageManager storageManager; private final NetworkManager networkManager; private final AuthorizationManager authorizationManager; private final String vmName; private final IdentifierProvider namer; private String creatableStorageAccountKey; private String creatableAvailabilitySetKey; private String creatablePrimaryNetworkInterfaceKey; private List<String> creatableSecondaryNetworkInterfaceKeys; private StorageAccount existingStorageAccountToAssociate; private AvailabilitySet existingAvailabilitySetToAssociate; private NetworkInterface existingPrimaryNetworkInterfaceToAssociate; private List<NetworkInterface> existingSecondaryNetworkInterfacesToAssociate; private VirtualMachineInstanceView virtualMachineInstanceView; private boolean isMarketplaceLinuxImage; private NetworkInterface.DefinitionStages.WithPrimaryPrivateIP nicDefinitionWithPrivateIp; private NetworkInterface.DefinitionStages.WithPrimaryNetworkSubnet nicDefinitionWithSubnet; private NetworkInterface.DefinitionStages.WithCreate nicDefinitionWithCreate; private VirtualMachineExtensionsImpl virtualMachineExtensions; private boolean isUnmanagedDiskSelected; private List<VirtualMachineUnmanagedDataDisk> unmanagedDataDisks; private final ManagedDataDiskCollection managedDataDisks; private final BootDiagnosticsHandler bootDiagnosticsHandler; private VirtualMachineMsiHandler virtualMachineMsiHandler; private PublicIpAddress.DefinitionStages.WithCreate implicitPipCreatable; private String newProximityPlacementGroupName; private ProximityPlacementGroupType newProximityPlacementGroupType; private boolean removeOsProfile; VirtualMachineUpdateInner updateParameterSnapshotOnUpdate; private static final SerializerAdapter SERIALIZER_ADAPTER = SerializerFactory.createDefaultManagementSerializerAdapter(); private final ObjectMapper mapper; private static final JacksonAnnotationIntrospector ANNOTATION_INTROSPECTOR = new JacksonAnnotationIntrospector() { @Override public JsonProperty.Access findPropertyAccess(Annotated annotated) { JsonProperty.Access access = super.findPropertyAccess(annotated); if (access == JsonProperty.Access.WRITE_ONLY) { return JsonProperty.Access.AUTO; } return access; } }; VirtualMachineImpl( String name, VirtualMachineInner innerModel, final ComputeManager computeManager, final StorageManager storageManager, final NetworkManager networkManager, final AuthorizationManager authorizationManager) { super(name, innerModel, computeManager); this.storageManager = storageManager; this.networkManager = networkManager; this.authorizationManager = authorizationManager; this.vmName = name; this.isMarketplaceLinuxImage = false; this.namer = this.manager().resourceManager().internalContext().createIdentifierProvider(this.vmName); this.creatableSecondaryNetworkInterfaceKeys = new ArrayList<>(); this.existingSecondaryNetworkInterfacesToAssociate = new ArrayList<>(); this.virtualMachineExtensions = new VirtualMachineExtensionsImpl(computeManager.serviceClient().getVirtualMachineExtensions(), this); this.managedDataDisks = new ManagedDataDiskCollection(this); initializeDataDisks(); this.bootDiagnosticsHandler = new BootDiagnosticsHandler(this); this.virtualMachineMsiHandler = new VirtualMachineMsiHandler(authorizationManager, this); this.newProximityPlacementGroupName = null; this.newProximityPlacementGroupType = null; this.mapper = new ObjectMapper(); this.mapper.setAnnotationIntrospector(ANNOTATION_INTROSPECTOR); } @Override public VirtualMachineImpl update() { updateParameterSnapshotOnUpdate = this.deepCopyInnerToUpdateParameter(); return super.update(); }; @Override public Mono<VirtualMachine> refreshAsync() { return super .refreshAsync() .map( virtualMachine -> { reset(virtualMachine.innerModel()); virtualMachineExtensions.refresh(); return virtualMachine; }); } @Override protected Mono<VirtualMachineInner> getInnerAsync() { return this .manager() .serviceClient() .getVirtualMachines() .getByResourceGroupAsync(this.resourceGroupName(), this.name()); } @Override public void deallocate() { this.deallocateAsync().block(); } @Override public Mono<Void> deallocateAsync() { return this .manager() .serviceClient() .getVirtualMachines() .deallocateAsync(this.resourceGroupName(), this.name()) .map(aVoid -> this.refreshAsync()) .then(); } @Override public void deallocate(boolean hibernate) { this.deallocateAsync(hibernate).block(); } @Override public Mono<Void> deallocateAsync(boolean hibernate) { return this .manager() .serviceClient() .getVirtualMachines() .deallocateAsync(this.resourceGroupName(), this.name(), hibernate) .map(aVoid -> this.refreshAsync()) .then(); } @Override public void generalize() { this.generalizeAsync().block(); } @Override public Mono<Void> generalizeAsync() { return this .manager() .serviceClient() .getVirtualMachines() .generalizeAsync(this.resourceGroupName(), this.name()); } @Override public void powerOff() { this.powerOffAsync().block(); } @Override public Mono<Void> powerOffAsync() { return this .manager() .serviceClient() .getVirtualMachines() .powerOffAsync(this.resourceGroupName(), this.name(), null); } @Override public void restart() { this.restartAsync().block(); } @Override public Mono<Void> restartAsync() { return this.manager().serviceClient().getVirtualMachines().restartAsync(this.resourceGroupName(), this.name()); } @Override public void start() { this.startAsync().block(); } @Override public Mono<Void> startAsync() { return this.manager().serviceClient().getVirtualMachines().startAsync(this.resourceGroupName(), this.name()); } @Override public void redeploy() { this.redeployAsync().block(); } @Override public Mono<Void> redeployAsync() { return this.manager().serviceClient().getVirtualMachines().redeployAsync(this.resourceGroupName(), this.name()); } @Override public void simulateEviction() { this.simulateEvictionAsync().block(); } @Override public Mono<Void> simulateEvictionAsync() { return this .manager() .serviceClient() .getVirtualMachines() .simulateEvictionAsync(this.resourceGroupName(), this.name()); } @Override public void convertToManaged() { this .manager() .serviceClient() .getVirtualMachines() .convertToManagedDisks(this.resourceGroupName(), this.name()); this.refresh(); } @Override public Mono<Void> convertToManagedAsync() { return this .manager() .serviceClient() .getVirtualMachines() .convertToManagedDisksAsync(this.resourceGroupName(), this.name()) .flatMap(aVoid -> refreshAsync()) .then(); } @Override public VirtualMachineEncryption diskEncryption() { return new VirtualMachineEncryptionImpl(this); } @Override public PagedIterable<VirtualMachineSize> availableSizes() { return PagedConverter.mapPage(this .manager() .serviceClient() .getVirtualMachines() .listAvailableSizes(this.resourceGroupName(), this.name()), VirtualMachineSizeImpl::new); } @Override public String capture(String containerName, String vhdPrefix, boolean overwriteVhd) { return this.captureAsync(containerName, vhdPrefix, overwriteVhd).block(); } @Override public Mono<String> captureAsync(String containerName, String vhdPrefix, boolean overwriteVhd) { VirtualMachineCaptureParameters parameters = new VirtualMachineCaptureParameters(); parameters.withDestinationContainerName(containerName); parameters.withOverwriteVhds(overwriteVhd); parameters.withVhdPrefix(vhdPrefix); return this .manager() .serviceClient() .getVirtualMachines() .captureAsync(this.resourceGroupName(), this.name(), parameters) .map( captureResultInner -> { try { return mapper.writeValueAsString(captureResultInner); } catch (JsonProcessingException ex) { throw logger.logExceptionAsError(Exceptions.propagate(ex)); } }); } @Override public VirtualMachineInstanceView refreshInstanceView() { return refreshInstanceViewAsync().block(); } @Override public Mono<VirtualMachineInstanceView> refreshInstanceViewAsync() { return this .manager() .serviceClient() .getVirtualMachines() .getByResourceGroupAsync(this.resourceGroupName(), this.name(), InstanceViewTypes.INSTANCE_VIEW) .map( inner -> { virtualMachineInstanceView = new VirtualMachineInstanceViewImpl(inner.instanceView()); return virtualMachineInstanceView; }) .switchIfEmpty( Mono .defer( () -> { virtualMachineInstanceView = null; return Mono.empty(); })); } @Override public RunCommandResult runPowerShellScript( List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) { return this .manager() .virtualMachines() .runPowerShellScript(this.resourceGroupName(), this.name(), scriptLines, scriptParameters); } @Override public Mono<RunCommandResult> runPowerShellScriptAsync( List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) { return this .manager() .virtualMachines() .runPowerShellScriptAsync(this.resourceGroupName(), this.name(), scriptLines, scriptParameters); } @Override public RunCommandResult runShellScript(List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) { return this .manager() .virtualMachines() .runShellScript(this.resourceGroupName(), this.name(), scriptLines, scriptParameters); } @Override public Mono<RunCommandResult> runShellScriptAsync( List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) { return this .manager() .virtualMachines() .runShellScriptAsync(this.resourceGroupName(), this.name(), scriptLines, scriptParameters); } @Override public RunCommandResult runCommand(RunCommandInput inputCommand) { return this.manager().virtualMachines().runCommand(this.resourceGroupName(), this.name(), inputCommand); } @Override public Mono<RunCommandResult> runCommandAsync(RunCommandInput inputCommand) { return this.manager().virtualMachines().runCommandAsync(this.resourceGroupName(), this.name(), inputCommand); } @Override public VirtualMachineImpl withNewPrimaryNetwork(Creatable<Network> creatable) { this.nicDefinitionWithPrivateIp = this.preparePrimaryNetworkInterface(this.namer.getRandomName("nic", 20)).withNewPrimaryNetwork(creatable); return this; } @Override public VirtualMachineImpl withNewPrimaryNetwork(String addressSpace) { this.nicDefinitionWithPrivateIp = this .preparePrimaryNetworkInterface(this.namer.getRandomName("nic", 20)) .withNewPrimaryNetwork(addressSpace); return this; } @Override public VirtualMachineImpl withExistingPrimaryNetwork(Network network) { this.nicDefinitionWithSubnet = this .preparePrimaryNetworkInterface(this.namer.getRandomName("nic", 20)) .withExistingPrimaryNetwork(network); return this; } @Override public VirtualMachineImpl withSubnet(String name) { this.nicDefinitionWithPrivateIp = this.nicDefinitionWithSubnet.withSubnet(name); return this; } @Override public VirtualMachineImpl withPrimaryPrivateIPAddressDynamic() { this.nicDefinitionWithCreate = this.nicDefinitionWithPrivateIp.withPrimaryPrivateIPAddressDynamic(); return this; } @Override public VirtualMachineImpl withPrimaryPrivateIPAddressStatic(String staticPrivateIPAddress) { this.nicDefinitionWithCreate = this.nicDefinitionWithPrivateIp.withPrimaryPrivateIPAddressStatic(staticPrivateIPAddress); return this; } @Override public VirtualMachineImpl withNewPrimaryPublicIPAddress(Creatable<PublicIpAddress> creatable) { Creatable<NetworkInterface> nicCreatable = this.nicDefinitionWithCreate.withNewPrimaryPublicIPAddress(creatable); this.creatablePrimaryNetworkInterfaceKey = this.addDependency(nicCreatable); return this; } @Override public VirtualMachineImpl withNewPrimaryPublicIPAddress(String leafDnsLabel) { PublicIpAddress.DefinitionStages.WithGroup definitionWithGroup = this .networkManager .publicIpAddresses() .define(this.namer.getRandomName("pip", 15)) .withRegion(this.regionName()); PublicIpAddress.DefinitionStages.WithCreate definitionAfterGroup; if (this.creatableGroup != null) { definitionAfterGroup = definitionWithGroup.withNewResourceGroup(this.creatableGroup); } else { definitionAfterGroup = definitionWithGroup.withExistingResourceGroup(this.resourceGroupName()); } this.implicitPipCreatable = definitionAfterGroup.withLeafDomainLabel(leafDnsLabel); Creatable<NetworkInterface> nicCreatable = this.nicDefinitionWithCreate.withNewPrimaryPublicIPAddress(this.implicitPipCreatable); this.creatablePrimaryNetworkInterfaceKey = this.addDependency(nicCreatable); return this; } @Override public VirtualMachineImpl withExistingPrimaryPublicIPAddress(PublicIpAddress publicIPAddress) { Creatable<NetworkInterface> nicCreatable = this.nicDefinitionWithCreate.withExistingPrimaryPublicIPAddress(publicIPAddress); this.creatablePrimaryNetworkInterfaceKey = this.addDependency(nicCreatable); return this; } @Override public VirtualMachineImpl withoutPrimaryPublicIPAddress() { Creatable<NetworkInterface> nicCreatable = this.nicDefinitionWithCreate; this.creatablePrimaryNetworkInterfaceKey = this.addDependency(nicCreatable); return this; } @Override public VirtualMachineImpl withNewPrimaryNetworkInterface(Creatable<NetworkInterface> creatable) { this.creatablePrimaryNetworkInterfaceKey = this.addDependency(creatable); return this; } public VirtualMachineImpl withNewPrimaryNetworkInterface(String name, String publicDnsNameLabel) { Creatable<NetworkInterface> definitionCreatable = prepareNetworkInterface(name).withNewPrimaryPublicIPAddress(publicDnsNameLabel); return withNewPrimaryNetworkInterface(definitionCreatable); } @Override public VirtualMachineImpl withExistingPrimaryNetworkInterface(NetworkInterface networkInterface) { this.existingPrimaryNetworkInterfaceToAssociate = networkInterface; return this; } @Override public VirtualMachineImpl withStoredWindowsImage(String imageUrl) { VirtualHardDisk userImageVhd = new VirtualHardDisk(); userImageVhd.withUri(imageUrl); this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().storageProfile().osDisk().withImage(userImageVhd); this.innerModel().storageProfile().osDisk().withOsType(OperatingSystemTypes.WINDOWS); this.innerModel().osProfile().withWindowsConfiguration(new WindowsConfiguration()); this.innerModel().osProfile().windowsConfiguration().withProvisionVMAgent(true); this.innerModel().osProfile().windowsConfiguration().withEnableAutomaticUpdates(true); return this; } @Override public VirtualMachineImpl withStoredLinuxImage(String imageUrl) { VirtualHardDisk userImageVhd = new VirtualHardDisk(); userImageVhd.withUri(imageUrl); this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().storageProfile().osDisk().withImage(userImageVhd); this.innerModel().storageProfile().osDisk().withOsType(OperatingSystemTypes.LINUX); this.innerModel().osProfile().withLinuxConfiguration(new LinuxConfiguration()); return this; } @Override public VirtualMachineImpl withPopularWindowsImage(KnownWindowsVirtualMachineImage knownImage) { return withSpecificWindowsImageVersion(knownImage.imageReference()); } @Override public VirtualMachineImpl withPopularLinuxImage(KnownLinuxVirtualMachineImage knownImage) { return withSpecificLinuxImageVersion(knownImage.imageReference()); } @Override public VirtualMachineImpl withSpecificWindowsImageVersion(ImageReference imageReference) { this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().storageProfile().withImageReference(imageReference); this.innerModel().osProfile().withWindowsConfiguration(new WindowsConfiguration()); this.innerModel().osProfile().windowsConfiguration().withProvisionVMAgent(true); this.innerModel().osProfile().windowsConfiguration().withEnableAutomaticUpdates(true); return this; } @Override public VirtualMachineImpl withSpecificLinuxImageVersion(ImageReference imageReference) { this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().storageProfile().withImageReference(imageReference); this.innerModel().osProfile().withLinuxConfiguration(new LinuxConfiguration()); this.isMarketplaceLinuxImage = true; return this; } @Override public VirtualMachineImpl withLatestWindowsImage(String publisher, String offer, String sku) { ImageReference imageReference = new ImageReference(); imageReference.withPublisher(publisher); imageReference.withOffer(offer); imageReference.withSku(sku); imageReference.withVersion("latest"); return withSpecificWindowsImageVersion(imageReference); } @Override public VirtualMachineImpl withLatestLinuxImage(String publisher, String offer, String sku) { ImageReference imageReference = new ImageReference(); imageReference.withPublisher(publisher); imageReference.withOffer(offer); imageReference.withSku(sku); imageReference.withVersion("latest"); return withSpecificLinuxImageVersion(imageReference); } @Override public VirtualMachineImpl withGeneralizedWindowsCustomImage(String customImageId) { ImageReference imageReferenceInner = new ImageReference(); imageReferenceInner.withId(customImageId); this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().storageProfile().withImageReference(imageReferenceInner); this.innerModel().osProfile().withWindowsConfiguration(new WindowsConfiguration()); this.innerModel().osProfile().windowsConfiguration().withProvisionVMAgent(true); this.innerModel().osProfile().windowsConfiguration().withEnableAutomaticUpdates(true); return this; } @Override public VirtualMachineImpl withSpecializedWindowsCustomImage(String customImageId) { this.withGeneralizedWindowsCustomImage(customImageId); this.removeOsProfile = true; return this; } @Override public VirtualMachineImpl withGeneralizedWindowsGalleryImageVersion(String galleryImageVersionId) { return this.withGeneralizedWindowsCustomImage(galleryImageVersionId); } @Override public VirtualMachineImpl withSpecializedWindowsGalleryImageVersion(String galleryImageVersionId) { return this.withSpecializedWindowsCustomImage(galleryImageVersionId); } @Override public VirtualMachineImpl withGeneralizedLinuxCustomImage(String customImageId) { ImageReference imageReferenceInner = new ImageReference(); imageReferenceInner.withId(customImageId); this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().storageProfile().withImageReference(imageReferenceInner); this.innerModel().osProfile().withLinuxConfiguration(new LinuxConfiguration()); this.isMarketplaceLinuxImage = true; return this; } @Override public VirtualMachineImpl withSpecializedLinuxCustomImage(String customImageId) { this.withGeneralizedLinuxCustomImage(customImageId); this.removeOsProfile = true; return this; } @Override public VirtualMachineImpl withGeneralizedLinuxGalleryImageVersion(String galleryImageVersionId) { return this.withGeneralizedLinuxCustomImage(galleryImageVersionId); } @Override public VirtualMachineImpl withSpecializedLinuxGalleryImageVersion(String galleryImageVersionId) { return this.withSpecializedLinuxCustomImage(galleryImageVersionId); } @Override public VirtualMachineImpl withSpecializedOSUnmanagedDisk(String osDiskUrl, OperatingSystemTypes osType) { VirtualHardDisk osVhd = new VirtualHardDisk(); osVhd.withUri(osDiskUrl); this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.ATTACH); this.innerModel().storageProfile().osDisk().withVhd(osVhd); this.innerModel().storageProfile().osDisk().withOsType(osType); this.innerModel().storageProfile().osDisk().withManagedDisk(null); return this; } @Override public VirtualMachineImpl withSpecializedOSDisk(Disk disk, OperatingSystemTypes osType) { ManagedDiskParameters diskParametersInner = new ManagedDiskParameters(); diskParametersInner.withId(disk.id()); this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.ATTACH); this.innerModel().storageProfile().osDisk().withManagedDisk(diskParametersInner); this.innerModel().storageProfile().osDisk().withOsType(osType); this.innerModel().storageProfile().osDisk().withVhd(null); return this; } @Override public VirtualMachineImpl withRootUsername(String rootUserName) { this.innerModel().osProfile().withAdminUsername(rootUserName); return this; } @Override public VirtualMachineImpl withAdminUsername(String adminUserName) { this.innerModel().osProfile().withAdminUsername(adminUserName); return this; } @Override public VirtualMachineImpl withSsh(String publicKeyData) { OSProfile osProfile = this.innerModel().osProfile(); if (osProfile.linuxConfiguration().ssh() == null) { SshConfiguration sshConfiguration = new SshConfiguration(); sshConfiguration.withPublicKeys(new ArrayList<SshPublicKey>()); osProfile.linuxConfiguration().withSsh(sshConfiguration); } SshPublicKey sshPublicKey = new SshPublicKey(); sshPublicKey.withKeyData(publicKeyData); sshPublicKey.withPath("/home/" + osProfile.adminUsername() + "/.ssh/authorized_keys"); osProfile.linuxConfiguration().ssh().publicKeys().add(sshPublicKey); return this; } @Override public VirtualMachineImpl withoutVMAgent() { this.innerModel().osProfile().windowsConfiguration().withProvisionVMAgent(false); return this; } @Override public VirtualMachineImpl withoutAutoUpdate() { this.innerModel().osProfile().windowsConfiguration().withEnableAutomaticUpdates(false); return this; } @Override public VirtualMachineImpl withTimeZone(String timeZone) { this.innerModel().osProfile().windowsConfiguration().withTimeZone(timeZone); return this; } @Override public VirtualMachineImpl withWinRM(WinRMListener listener) { if (this.innerModel().osProfile().windowsConfiguration().winRM() == null) { WinRMConfiguration winRMConfiguration = new WinRMConfiguration(); this.innerModel().osProfile().windowsConfiguration().withWinRM(winRMConfiguration); } this.innerModel().osProfile().windowsConfiguration().winRM().listeners().add(listener); return this; } @Override public VirtualMachineImpl withRootPassword(String password) { this.innerModel().osProfile().withAdminPassword(password); return this; } @Override public VirtualMachineImpl withAdminPassword(String password) { this.innerModel().osProfile().withAdminPassword(password); return this; } @Override public VirtualMachineImpl withCustomData(String base64EncodedCustomData) { this.innerModel().osProfile().withCustomData(base64EncodedCustomData); return this; } @Override public VirtualMachineImpl withComputerName(String computerName) { this.innerModel().osProfile().withComputerName(computerName); return this; } @Override public VirtualMachineImpl withSize(String sizeName) { this.innerModel().hardwareProfile().withVmSize(VirtualMachineSizeTypes.fromString(sizeName)); return this; } @Override public VirtualMachineImpl withSize(VirtualMachineSizeTypes size) { this.innerModel().hardwareProfile().withVmSize(size); return this; } @Override public VirtualMachineImpl withOSDiskCaching(CachingTypes cachingType) { this.innerModel().storageProfile().osDisk().withCaching(cachingType); return this; } @Override public VirtualMachineImpl withOSDiskVhdLocation(String containerName, String vhdName) { if (isManagedDiskEnabled()) { return this; } StorageProfile storageProfile = this.innerModel().storageProfile(); OSDisk osDisk = storageProfile.osDisk(); if (!this.isOSDiskFromImage(osDisk)) { return this; } if (this.isOsDiskFromCustomImage(storageProfile)) { return this; } if (this.isOSDiskFromPlatformImage(storageProfile)) { VirtualHardDisk osVhd = new VirtualHardDisk(); osVhd.withUri(temporaryBlobUrl(containerName, vhdName)); this.innerModel().storageProfile().osDisk().withVhd(osVhd); return this; } if (this.isOSDiskFromStoredImage(storageProfile)) { VirtualHardDisk osVhd = new VirtualHardDisk(); try { URL sourceCustomImageUrl = new URL(osDisk.image().uri()); URL destinationVhdUrl = new URL( sourceCustomImageUrl.getProtocol(), sourceCustomImageUrl.getHost(), "/" + containerName + "/" + vhdName); osVhd.withUri(destinationVhdUrl.toString()); } catch (MalformedURLException ex) { throw logger.logExceptionAsError(new RuntimeException(ex)); } this.innerModel().storageProfile().osDisk().withVhd(osVhd); } return this; } @Override public VirtualMachineImpl withOSDiskStorageAccountType(StorageAccountTypes accountType) { if (this.innerModel().storageProfile().osDisk().managedDisk() == null) { this.innerModel().storageProfile().osDisk().withManagedDisk(new ManagedDiskParameters()); } this.innerModel().storageProfile().osDisk().managedDisk().withStorageAccountType(accountType); return this; } @Override public VirtualMachineImpl withDataDiskDefaultCachingType(CachingTypes cachingType) { this.managedDataDisks.setDefaultCachingType(cachingType); return this; } @Override public VirtualMachineImpl withDataDiskDefaultStorageAccountType(StorageAccountTypes storageAccountType) { this.managedDataDisks.setDefaultStorageAccountType(storageAccountType); return this; } @Override public VirtualMachineImpl withOSDiskEncryptionSettings(DiskEncryptionSettings settings) { this.innerModel().storageProfile().osDisk().withEncryptionSettings(settings); return this; } @Override public VirtualMachineImpl withOSDiskSizeInGB(int size) { this.innerModel().storageProfile().osDisk().withDiskSizeGB(size); return this; } @Override public VirtualMachineImpl withOSDiskName(String name) { this.innerModel().storageProfile().osDisk().withName(name); return this; } @Override public UnmanagedDataDiskImpl defineUnmanagedDataDisk(String name) { throwIfManagedDiskEnabled(ManagedUnmanagedDiskErrors.VM_BOTH_MANAGED_AND_UNMANAGED_DISK_NOT_ALLOWED); return UnmanagedDataDiskImpl.prepareDataDisk(name, this); } @Override public VirtualMachineImpl withNewUnmanagedDataDisk(Integer sizeInGB) { throwIfManagedDiskEnabled(ManagedUnmanagedDiskErrors.VM_BOTH_MANAGED_AND_UNMANAGED_DISK_NOT_ALLOWED); return defineUnmanagedDataDisk(null).withNewVhd(sizeInGB).attach(); } @Override public VirtualMachineImpl withExistingUnmanagedDataDisk( String storageAccountName, String containerName, String vhdName) { throwIfManagedDiskEnabled(ManagedUnmanagedDiskErrors.VM_BOTH_MANAGED_AND_UNMANAGED_DISK_NOT_ALLOWED); return defineUnmanagedDataDisk(null).withExistingVhd(storageAccountName, containerName, vhdName).attach(); } @Override public VirtualMachineImpl withoutUnmanagedDataDisk(String name) { int idx = -1; for (VirtualMachineUnmanagedDataDisk dataDisk : this.unmanagedDataDisks) { idx++; if (dataDisk.name().equalsIgnoreCase(name)) { this.unmanagedDataDisks.remove(idx); this.innerModel().storageProfile().dataDisks().remove(idx); break; } } return this; } @Override public VirtualMachineImpl withoutUnmanagedDataDisk(int lun) { int idx = -1; for (VirtualMachineUnmanagedDataDisk dataDisk : this.unmanagedDataDisks) { idx++; if (dataDisk.lun() == lun) { this.unmanagedDataDisks.remove(idx); this.innerModel().storageProfile().dataDisks().remove(idx); break; } } return this; } @Override public UnmanagedDataDiskImpl updateUnmanagedDataDisk(String name) { throwIfManagedDiskEnabled(ManagedUnmanagedDiskErrors.VM_NO_UNMANAGED_DISK_TO_UPDATE); for (VirtualMachineUnmanagedDataDisk dataDisk : this.unmanagedDataDisks) { if (dataDisk.name().equalsIgnoreCase(name)) { return (UnmanagedDataDiskImpl) dataDisk; } } throw logger.logExceptionAsError(new RuntimeException("A data disk with name '" + name + "' not found")); } @Override public VirtualMachineImpl withNewDataDisk(Creatable<Disk> creatable) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); this.managedDataDisks.newDisksToAttach.put(this.addDependency(creatable), new DataDisk().withLun(-1)); return this; } @Override public VirtualMachineImpl withNewDataDisk(Creatable<Disk> creatable, int lun, CachingTypes cachingType) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); this .managedDataDisks .newDisksToAttach .put(this.addDependency(creatable), new DataDisk().withLun(lun).withCaching(cachingType)); return this; } @Override public VirtualMachineImpl withNewDataDisk(int sizeInGB) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); this.managedDataDisks.implicitDisksToAssociate.add(new DataDisk().withLun(-1).withDiskSizeGB(sizeInGB)); return this; } @Override public VirtualMachineImpl withNewDataDisk(int sizeInGB, int lun, CachingTypes cachingType) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); this .managedDataDisks .implicitDisksToAssociate .add(new DataDisk().withLun(lun).withDiskSizeGB(sizeInGB).withCaching(cachingType)); return this; } @Override public VirtualMachineImpl withNewDataDisk( int sizeInGB, int lun, CachingTypes cachingType, StorageAccountTypes storageAccountType) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); ManagedDiskParameters managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withStorageAccountType(storageAccountType); this .managedDataDisks .implicitDisksToAssociate .add( new DataDisk() .withLun(lun) .withDiskSizeGB(sizeInGB) .withCaching(cachingType) .withManagedDisk(managedDiskParameters)); return this; } @Override public VirtualMachineImpl withExistingDataDisk(Disk disk) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); ManagedDiskParameters managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withId(disk.id()); this .managedDataDisks .existingDisksToAttach .add(new DataDisk().withLun(-1).withManagedDisk(managedDiskParameters)); return this; } @Override public VirtualMachineImpl withExistingDataDisk(Disk disk, int lun, CachingTypes cachingType) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); ManagedDiskParameters managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withId(disk.id()); this .managedDataDisks .existingDisksToAttach .add(new DataDisk().withLun(lun).withManagedDisk(managedDiskParameters).withCaching(cachingType)); return this; } @Override public VirtualMachineImpl withExistingDataDisk(Disk disk, int newSizeInGB, int lun, CachingTypes cachingType) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); ManagedDiskParameters managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withId(disk.id()); this .managedDataDisks .existingDisksToAttach .add( new DataDisk() .withLun(lun) .withDiskSizeGB(newSizeInGB) .withManagedDisk(managedDiskParameters) .withCaching(cachingType)); return this; } @Override public VirtualMachineImpl withNewDataDiskFromImage(int imageLun) { this.managedDataDisks.newDisksFromImage.add(new DataDisk().withLun(imageLun)); return this; } @Override public VirtualMachineImpl withNewDataDiskFromImage(int imageLun, int newSizeInGB, CachingTypes cachingType) { this .managedDataDisks .newDisksFromImage .add(new DataDisk().withLun(imageLun).withDiskSizeGB(newSizeInGB).withCaching(cachingType)); return this; } @Override public VirtualMachineImpl withNewDataDiskFromImage( int imageLun, int newSizeInGB, CachingTypes cachingType, StorageAccountTypes storageAccountType) { ManagedDiskParameters managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withStorageAccountType(storageAccountType); this .managedDataDisks .newDisksFromImage .add( new DataDisk() .withLun(imageLun) .withDiskSizeGB(newSizeInGB) .withManagedDisk(managedDiskParameters) .withCaching(cachingType)); return this; } @Override public VirtualMachineImpl withoutDataDisk(int lun) { if (!isManagedDiskEnabled()) { return this; } this.managedDataDisks.diskLunsToRemove.add(lun); return this; } @Override public VirtualMachineImpl withNewStorageAccount(Creatable<StorageAccount> creatable) { if (this.creatableStorageAccountKey == null) { this.creatableStorageAccountKey = this.addDependency(creatable); } return this; } @Override public VirtualMachineImpl withNewStorageAccount(String name) { StorageAccount.DefinitionStages.WithGroup definitionWithGroup = this.storageManager.storageAccounts().define(name).withRegion(this.regionName()); Creatable<StorageAccount> definitionAfterGroup; if (this.creatableGroup != null) { definitionAfterGroup = definitionWithGroup.withNewResourceGroup(this.creatableGroup); } else { definitionAfterGroup = definitionWithGroup.withExistingResourceGroup(this.resourceGroupName()); } return withNewStorageAccount(definitionAfterGroup); } @Override public VirtualMachineImpl withExistingStorageAccount(StorageAccount storageAccount) { this.existingStorageAccountToAssociate = storageAccount; return this; } @Override public VirtualMachineImpl withNewAvailabilitySet(Creatable<AvailabilitySet> creatable) { if (this.creatableAvailabilitySetKey == null) { this.creatableAvailabilitySetKey = this.addDependency(creatable); } return this; } @Override public VirtualMachineImpl withProximityPlacementGroup(String proximityPlacementGroupId) { this.innerModel().withProximityPlacementGroup(new SubResource().withId(proximityPlacementGroupId)); newProximityPlacementGroupName = null; return this; } @Override public VirtualMachineImpl withNewProximityPlacementGroup( String proximityPlacementGroupName, ProximityPlacementGroupType type) { this.newProximityPlacementGroupName = proximityPlacementGroupName; this.newProximityPlacementGroupType = type; this.innerModel().withProximityPlacementGroup(null); return this; } @Override public VirtualMachineImpl withoutProximityPlacementGroup() { this.innerModel().withProximityPlacementGroup(null); return this; } @Override public VirtualMachineImpl withNewAvailabilitySet(String name) { AvailabilitySet.DefinitionStages.WithGroup definitionWithGroup = super.myManager.availabilitySets().define(name).withRegion(this.regionName()); AvailabilitySet.DefinitionStages.WithSku definitionWithSku; if (this.creatableGroup != null) { definitionWithSku = definitionWithGroup.withNewResourceGroup(this.creatableGroup); } else { definitionWithSku = definitionWithGroup.withExistingResourceGroup(this.resourceGroupName()); } Creatable<AvailabilitySet> creatable; if (isManagedDiskEnabled()) { creatable = definitionWithSku.withSku(AvailabilitySetSkuTypes.ALIGNED); } else { creatable = definitionWithSku.withSku(AvailabilitySetSkuTypes.CLASSIC); } return withNewAvailabilitySet(creatable); } @Override public VirtualMachineImpl withExistingAvailabilitySet(AvailabilitySet availabilitySet) { this.existingAvailabilitySetToAssociate = availabilitySet; return this; } @Override public VirtualMachineImpl withNewSecondaryNetworkInterface(Creatable<NetworkInterface> creatable) { this.creatableSecondaryNetworkInterfaceKeys.add(this.addDependency(creatable)); return this; } @Override public VirtualMachineImpl withExistingSecondaryNetworkInterface(NetworkInterface networkInterface) { this.existingSecondaryNetworkInterfacesToAssociate.add(networkInterface); return this; } @Override public VirtualMachineExtensionImpl defineNewExtension(String name) { return this.virtualMachineExtensions.define(name); } @Override public VirtualMachineImpl withoutSecondaryNetworkInterface(String name) { if (this.innerModel().networkProfile() != null && this.innerModel().networkProfile().networkInterfaces() != null) { int idx = -1; for (NetworkInterfaceReference nicReference : this.innerModel().networkProfile().networkInterfaces()) { idx++; if (!nicReference.primary() && name.equalsIgnoreCase(ResourceUtils.nameFromResourceId(nicReference.id()))) { this.innerModel().networkProfile().networkInterfaces().remove(idx); break; } } } return this; } @Override public VirtualMachineExtensionImpl updateExtension(String name) { return this.virtualMachineExtensions.update(name); } @Override public VirtualMachineImpl withoutExtension(String name) { this.virtualMachineExtensions.remove(name); return this; } @Override public VirtualMachineImpl withPlan(PurchasePlan plan) { this.innerModel().withPlan(new Plan()); this.innerModel().plan().withPublisher(plan.publisher()).withProduct(plan.product()).withName(plan.name()); return this; } @Override public VirtualMachineImpl withPromotionalPlan(PurchasePlan plan, String promotionCode) { this.withPlan(plan); this.innerModel().plan().withPromotionCode(promotionCode); return this; } @Override public VirtualMachineImpl withUnmanagedDisks() { this.isUnmanagedDiskSelected = true; return this; } @Override public VirtualMachineImpl withBootDiagnosticsOnManagedStorageAccount() { this.bootDiagnosticsHandler.withBootDiagnostics(true); return this; } @Override public VirtualMachineImpl withBootDiagnostics() { this.bootDiagnosticsHandler.withBootDiagnostics(false); return this; } @Override public VirtualMachineImpl withBootDiagnostics(Creatable<StorageAccount> creatable) { this.bootDiagnosticsHandler.withBootDiagnostics(creatable); return this; } @Override public VirtualMachineImpl withBootDiagnostics(String storageAccountBlobEndpointUri) { this.bootDiagnosticsHandler.withBootDiagnostics(storageAccountBlobEndpointUri); return this; } @Override public VirtualMachineImpl withBootDiagnostics(StorageAccount storageAccount) { this.bootDiagnosticsHandler.withBootDiagnostics(storageAccount); return this; } @Override public VirtualMachineImpl withoutBootDiagnostics() { this.bootDiagnosticsHandler.withoutBootDiagnostics(); return this; } @Override public VirtualMachineImpl withPriority(VirtualMachinePriorityTypes priority) { this.innerModel().withPriority(priority); return this; } @Override public VirtualMachineImpl withLowPriority() { this.withPriority(VirtualMachinePriorityTypes.LOW); return this; } @Override public VirtualMachineImpl withLowPriority(VirtualMachineEvictionPolicyTypes policy) { this.withLowPriority(); this.innerModel().withEvictionPolicy(policy); return this; } @Override public VirtualMachineImpl withSpotPriority() { this.withPriority(VirtualMachinePriorityTypes.SPOT); return this; } @Override public VirtualMachineImpl withSpotPriority(VirtualMachineEvictionPolicyTypes policy) { this.withSpotPriority(); this.innerModel().withEvictionPolicy(policy); return this; } @Override public VirtualMachineImpl withMaxPrice(Double maxPrice) { this.innerModel().withBillingProfile(new BillingProfile().withMaxPrice(maxPrice)); return this; } @Override public VirtualMachineImpl withSystemAssignedManagedServiceIdentity() { this.virtualMachineMsiHandler.withLocalManagedServiceIdentity(); return this; } @Override public VirtualMachineImpl withoutSystemAssignedManagedServiceIdentity() { this.virtualMachineMsiHandler.withoutLocalManagedServiceIdentity(); return this; } @Override public VirtualMachineImpl withSystemAssignedIdentityBasedAccessTo(String resourceId, BuiltInRole role) { this.virtualMachineMsiHandler.withAccessTo(resourceId, role); return this; } @Override public VirtualMachineImpl withSystemAssignedIdentityBasedAccessToCurrentResourceGroup(BuiltInRole role) { this.virtualMachineMsiHandler.withAccessToCurrentResourceGroup(role); return this; } @Override public VirtualMachineImpl withSystemAssignedIdentityBasedAccessTo(String resourceId, String roleDefinitionId) { this.virtualMachineMsiHandler.withAccessTo(resourceId, roleDefinitionId); return this; } @Override public VirtualMachineImpl withSystemAssignedIdentityBasedAccessToCurrentResourceGroup(String roleDefinitionId) { this.virtualMachineMsiHandler.withAccessToCurrentResourceGroup(roleDefinitionId); return this; } @Override public VirtualMachineImpl withNewUserAssignedManagedServiceIdentity(Creatable<Identity> creatableIdentity) { this.virtualMachineMsiHandler.withNewExternalManagedServiceIdentity(creatableIdentity); return this; } @Override public VirtualMachineImpl withExistingUserAssignedManagedServiceIdentity(Identity identity) { this.virtualMachineMsiHandler.withExistingExternalManagedServiceIdentity(identity); return this; } @Override public VirtualMachineImpl withoutUserAssignedManagedServiceIdentity(String identityId) { this.virtualMachineMsiHandler.withoutExternalManagedServiceIdentity(identityId); return this; } @Override public VirtualMachineImpl withLicenseType(String licenseType) { innerModel().withLicenseType(licenseType); return this; } @Override public VirtualMachineImpl enableHibernation() { if (this.innerModel().additionalCapabilities() == null) { this.innerModel().withAdditionalCapabilities(new AdditionalCapabilities()); } this.innerModel().additionalCapabilities().withHibernationEnabled(true); return this; } @Override public VirtualMachineImpl disableHibernation() { if (this.innerModel().additionalCapabilities() == null) { this.innerModel().withAdditionalCapabilities(new AdditionalCapabilities()); } this.innerModel().additionalCapabilities().withHibernationEnabled(false); return this; } @Override public boolean isManagedDiskEnabled() { if (isOsDiskFromCustomImage(this.innerModel().storageProfile())) { return true; } if (isOSDiskAttachedManaged(this.innerModel().storageProfile().osDisk())) { return true; } if (isOSDiskFromStoredImage(this.innerModel().storageProfile())) { return false; } if (isOSDiskAttachedUnmanaged(this.innerModel().storageProfile().osDisk())) { return false; } if (isOSDiskFromPlatformImage(this.innerModel().storageProfile())) { if (this.isUnmanagedDiskSelected) { return false; } } if (isInCreateMode()) { return true; } else { return this.innerModel().storageProfile().osDisk().vhd() == null; } } @Override public String computerName() { if (innerModel().osProfile() == null) { return null; } return innerModel().osProfile().computerName(); } @Override public VirtualMachineSizeTypes size() { return innerModel().hardwareProfile().vmSize(); } @Override public OperatingSystemTypes osType() { if (innerModel().storageProfile().osDisk().osType() != null) { return innerModel().storageProfile().osDisk().osType(); } if (innerModel().osProfile() != null) { if (innerModel().osProfile().linuxConfiguration() != null) { return OperatingSystemTypes.LINUX; } if (innerModel().osProfile().windowsConfiguration() != null) { return OperatingSystemTypes.WINDOWS; } } return null; } @Override public String osUnmanagedDiskVhdUri() { if (isManagedDiskEnabled() || this.storageProfile().osDisk().vhd() == null) { return null; } return innerModel().storageProfile().osDisk().vhd().uri(); } @Override public CachingTypes osDiskCachingType() { return innerModel().storageProfile().osDisk().caching(); } @Override public int osDiskSize() { return ResourceManagerUtils.toPrimitiveInt(innerModel().storageProfile().osDisk().diskSizeGB()); } @Override public StorageAccountTypes osDiskStorageAccountType() { if (!isManagedDiskEnabled() || this.storageProfile().osDisk().managedDisk() == null) { return null; } return this.storageProfile().osDisk().managedDisk().storageAccountType(); } @Override public String osDiskId() { if (!isManagedDiskEnabled()) { return null; } return this.storageProfile().osDisk().managedDisk().id(); } @Override public Map<Integer, VirtualMachineUnmanagedDataDisk> unmanagedDataDisks() { Map<Integer, VirtualMachineUnmanagedDataDisk> dataDisks = new HashMap<>(); if (!isManagedDiskEnabled()) { for (VirtualMachineUnmanagedDataDisk dataDisk : this.unmanagedDataDisks) { dataDisks.put(dataDisk.lun(), dataDisk); } } return Collections.unmodifiableMap(dataDisks); } @Override public Map<Integer, VirtualMachineDataDisk> dataDisks() { Map<Integer, VirtualMachineDataDisk> dataDisks = new HashMap<>(); if (isManagedDiskEnabled()) { List<DataDisk> innerDataDisks = this.innerModel().storageProfile().dataDisks(); if (innerDataDisks != null) { for (DataDisk innerDataDisk : innerDataDisks) { dataDisks.put(innerDataDisk.lun(), new VirtualMachineDataDiskImpl(innerDataDisk)); } } } return Collections.unmodifiableMap(dataDisks); } @Override public NetworkInterface getPrimaryNetworkInterface() { return this.getPrimaryNetworkInterfaceAsync().block(); } @Override public Mono<NetworkInterface> getPrimaryNetworkInterfaceAsync() { return this.networkManager.networkInterfaces().getByIdAsync(primaryNetworkInterfaceId()); } @Override public PublicIpAddress getPrimaryPublicIPAddress() { return this.getPrimaryNetworkInterface().primaryIPConfiguration().getPublicIpAddress(); } @Override public String getPrimaryPublicIPAddressId() { return this.getPrimaryNetworkInterface().primaryIPConfiguration().publicIpAddressId(); } @Override public List<String> networkInterfaceIds() { List<String> nicIds = new ArrayList<>(); for (NetworkInterfaceReference nicRef : innerModel().networkProfile().networkInterfaces()) { nicIds.add(nicRef.id()); } return nicIds; } @Override public String primaryNetworkInterfaceId() { final List<NetworkInterfaceReference> nicRefs = this.innerModel().networkProfile().networkInterfaces(); String primaryNicRefId = null; if (nicRefs.size() == 1) { primaryNicRefId = nicRefs.get(0).id(); } else if (nicRefs.size() == 0) { primaryNicRefId = null; } else { for (NetworkInterfaceReference nicRef : innerModel().networkProfile().networkInterfaces()) { if (nicRef.primary() != null && nicRef.primary()) { primaryNicRefId = nicRef.id(); break; } } if (primaryNicRefId == null) { primaryNicRefId = nicRefs.get(0).id(); } } return primaryNicRefId; } @Override public String availabilitySetId() { if (innerModel().availabilitySet() != null) { return innerModel().availabilitySet().id(); } return null; } @Override public String provisioningState() { return innerModel().provisioningState(); } @Override public String licenseType() { return innerModel().licenseType(); } @Override public ProximityPlacementGroup proximityPlacementGroup() { if (innerModel().proximityPlacementGroup() == null) { return null; } else { ResourceId id = ResourceId.fromString(innerModel().proximityPlacementGroup().id()); ProximityPlacementGroupInner plgInner = manager() .serviceClient() .getProximityPlacementGroups() .getByResourceGroup(id.resourceGroupName(), id.name()); if (plgInner == null) { return null; } else { return new ProximityPlacementGroupImpl(plgInner); } } } @Override public Mono<List<VirtualMachineExtension>> listExtensionsAsync() { return this.virtualMachineExtensions.listAsync(); } @Override public Map<String, VirtualMachineExtension> listExtensions() { return this.virtualMachineExtensions.asMap(); } @Override public Plan plan() { return innerModel().plan(); } @Override public StorageProfile storageProfile() { return innerModel().storageProfile(); } @Override public OSProfile osProfile() { return innerModel().osProfile(); } @Override public DiagnosticsProfile diagnosticsProfile() { return innerModel().diagnosticsProfile(); } @Override public String vmId() { return innerModel().vmId(); } @Override public VirtualMachineInstanceView instanceView() { if (this.virtualMachineInstanceView == null) { this.refreshInstanceView(); } return this.virtualMachineInstanceView; } @Override public Set<AvailabilityZoneId> availabilityZones() { Set<AvailabilityZoneId> zones = new HashSet<>(); if (this.innerModel().zones() != null) { for (String zone : this.innerModel().zones()) { zones.add(AvailabilityZoneId.fromString(zone)); } } return Collections.unmodifiableSet(zones); } @Override public PowerState powerState() { return PowerState.fromInstanceView(this.instanceView()); } @Override public boolean isBootDiagnosticsEnabled() { return this.bootDiagnosticsHandler.isBootDiagnosticsEnabled(); } @Override public String bootDiagnosticsStorageUri() { return this.bootDiagnosticsHandler.bootDiagnosticsStorageUri(); } @Override public boolean isManagedServiceIdentityEnabled() { ResourceIdentityType type = this.managedServiceIdentityType(); return type != null && !type.equals(ResourceIdentityType.NONE); } @Override public String systemAssignedManagedServiceIdentityTenantId() { if (this.innerModel().identity() != null) { return this.innerModel().identity().tenantId(); } return null; } @Override public String systemAssignedManagedServiceIdentityPrincipalId() { if (this.innerModel().identity() != null) { return this.innerModel().identity().principalId(); } return null; } @Override public ResourceIdentityType managedServiceIdentityType() { if (this.innerModel().identity() != null) { return this.innerModel().identity().type(); } return null; } @Override public Set<String> userAssignedManagedServiceIdentityIds() { if (this.innerModel().identity() != null && this.innerModel().identity().userAssignedIdentities() != null) { return Collections .unmodifiableSet(new HashSet<String>(this.innerModel().identity().userAssignedIdentities().keySet())); } return Collections.unmodifiableSet(new HashSet<String>()); } @Override public BillingProfile billingProfile() { return this.innerModel().billingProfile(); } @Override public boolean isHibernationEnabled() { return this.innerModel().additionalCapabilities() != null && ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().additionalCapabilities().hibernationEnabled()); } @Override public VirtualMachinePriorityTypes priority() { return this.innerModel().priority(); } @Override public VirtualMachineEvictionPolicyTypes evictionPolicy() { return this.innerModel().evictionPolicy(); } @Override public void beforeGroupCreateOrUpdate() { if (creatableStorageAccountKey == null && existingStorageAccountToAssociate == null) { if (osDiskRequiresImplicitStorageAccountCreation() || dataDisksRequiresImplicitStorageAccountCreation()) { Creatable<StorageAccount> storageAccountCreatable = null; if (this.creatableGroup != null) { storageAccountCreatable = this .storageManager .storageAccounts() .define(this.namer.getRandomName("stg", 24).replace("-", "")) .withRegion(this.regionName()) .withNewResourceGroup(this.creatableGroup); } else { storageAccountCreatable = this .storageManager .storageAccounts() .define(this.namer.getRandomName("stg", 24).replace("-", "")) .withRegion(this.regionName()) .withExistingResourceGroup(this.resourceGroupName()); } this.creatableStorageAccountKey = this.addDependency(storageAccountCreatable); } } this.bootDiagnosticsHandler.prepare(); } @Override public Mono<VirtualMachine> createResourceAsync() { return prepareCreateResourceAsync() .flatMap( virtualMachine -> this .manager() .serviceClient() .getVirtualMachines() .createOrUpdateAsync(resourceGroupName(), vmName, innerModel()) .map( virtualMachineInner -> { reset(virtualMachineInner); return this; })); } private Mono<VirtualMachine> prepareCreateResourceAsync() { setOSDiskDefaults(); setOSProfileDefaults(); setHardwareProfileDefaults(); if (isManagedDiskEnabled()) { managedDataDisks.setDataDisksDefaults(); } else { UnmanagedDataDiskImpl.setDataDisksDefaults(this.unmanagedDataDisks, this.vmName); } this.handleUnManagedOSAndDataDisksStorageSettings(); this.bootDiagnosticsHandler.handleDiagnosticsSettings(); this.handleNetworkSettings(); return this .createNewProximityPlacementGroupAsync() .map( virtualMachine -> { this.handleAvailabilitySettings(); this.virtualMachineMsiHandler.processCreatedExternalIdentities(); this.virtualMachineMsiHandler.handleExternalIdentities(); return virtualMachine; }); } public Accepted<VirtualMachine> beginCreate() { return AcceptedImpl .<VirtualMachine, VirtualMachineInner>newAccepted( logger, this.manager().serviceClient().getHttpPipeline(), this.manager().serviceClient().getDefaultPollInterval(), () -> this .manager() .serviceClient() .getVirtualMachines() .createOrUpdateWithResponseAsync(resourceGroupName(), vmName, innerModel()) .block(), inner -> new VirtualMachineImpl( inner.name(), inner, this.manager(), this.storageManager, this.networkManager, this.authorizationManager), VirtualMachineInner.class, () -> { Flux<Indexable> dependencyTasksAsync = taskGroup().invokeDependencyAsync(taskGroup().newInvocationContext()); dependencyTasksAsync.blockLast(); prepareCreateResourceAsync().block(); }, this::reset, Context.NONE); } @Override public Mono<VirtualMachine> updateResourceAsync() { if (isManagedDiskEnabled()) { managedDataDisks.setDataDisksDefaults(); } else { UnmanagedDataDiskImpl.setDataDisksDefaults(this.unmanagedDataDisks, this.vmName); } this.handleUnManagedOSAndDataDisksStorageSettings(); this.bootDiagnosticsHandler.handleDiagnosticsSettings(); this.handleNetworkSettings(); this.handleAvailabilitySettings(); this.virtualMachineMsiHandler.processCreatedExternalIdentities(); VirtualMachineUpdateInner updateParameter = new VirtualMachineUpdateInner(); this.copyInnerToUpdateParameter(updateParameter); this.virtualMachineMsiHandler.handleExternalIdentities(updateParameter); final boolean vmModified = this.isVirtualMachineModifiedDuringUpdate(updateParameter); if (vmModified) { return this .manager() .serviceClient() .getVirtualMachines() .updateAsync(resourceGroupName(), vmName, updateParameter) .map( virtualMachineInner -> { reset(virtualMachineInner); return this; }); } else { return Mono.just(this); } } @Override public Mono<Void> afterPostRunAsync(boolean isGroupFaulted) { this.virtualMachineExtensions.clear(); if (isGroupFaulted) { return Mono.empty(); } else { return this.refreshAsync().then(); } } VirtualMachineImpl withExtension(VirtualMachineExtensionImpl extension) { this.virtualMachineExtensions.addExtension(extension); return this; } private void reset(VirtualMachineInner inner) { this.setInner(inner); clearCachedRelatedResources(); initializeDataDisks(); virtualMachineMsiHandler.clear(); } VirtualMachineImpl withUnmanagedDataDisk(UnmanagedDataDiskImpl dataDisk) { this.innerModel().storageProfile().dataDisks().add(dataDisk.innerModel()); this.unmanagedDataDisks.add(dataDisk); return this; } @Override public VirtualMachineImpl withAvailabilityZone(AvailabilityZoneId zoneId) { if (isInCreateMode()) { if (this.innerModel().zones() == null) { this.innerModel().withZones(new ArrayList<String>()); } this.innerModel().zones().add(zoneId.toString()); if (this.implicitPipCreatable != null) { this.implicitPipCreatable.withAvailabilityZone(zoneId); } } return this; } AzureEnvironment environment() { return manager().environment(); } private void setOSDiskDefaults() { if (isInUpdateMode()) { return; } StorageProfile storageProfile = this.innerModel().storageProfile(); OSDisk osDisk = storageProfile.osDisk(); if (isOSDiskFromImage(osDisk)) { if (isManagedDiskEnabled()) { if (osDisk.managedDisk() == null) { osDisk.withManagedDisk(new ManagedDiskParameters()); } if (osDisk.managedDisk().storageAccountType() == null) { osDisk.managedDisk().withStorageAccountType(StorageAccountTypes.STANDARD_LRS); } osDisk.withVhd(null); } else { if (isOSDiskFromPlatformImage(storageProfile) || isOSDiskFromStoredImage(storageProfile)) { if (osDisk.vhd() == null) { String osDiskVhdContainerName = "vhds"; String osDiskVhdName = this.vmName + "-os-disk-" + UUID.randomUUID().toString() + ".vhd"; withOSDiskVhdLocation(osDiskVhdContainerName, osDiskVhdName); } osDisk.withManagedDisk(null); } if (osDisk.name() == null) { withOSDiskName(this.vmName + "-os-disk"); } } } else { if (isManagedDiskEnabled()) { if (osDisk.managedDisk() != null) { osDisk.managedDisk().withStorageAccountType(null); } osDisk.withVhd(null); } else { osDisk.withManagedDisk(null); if (osDisk.name() == null) { withOSDiskName(this.vmName + "-os-disk"); } } } if (osDisk.caching() == null) { withOSDiskCaching(CachingTypes.READ_WRITE); } } private void setOSProfileDefaults() { if (isInUpdateMode()) { return; } StorageProfile storageProfile = this.innerModel().storageProfile(); OSDisk osDisk = storageProfile.osDisk(); if (!removeOsProfile && isOSDiskFromImage(osDisk)) { if (osDisk.osType() == OperatingSystemTypes.LINUX || this.isMarketplaceLinuxImage) { OSProfile osProfile = this.innerModel().osProfile(); if (osProfile.linuxConfiguration() == null) { osProfile.withLinuxConfiguration(new LinuxConfiguration()); } this .innerModel() .osProfile() .linuxConfiguration() .withDisablePasswordAuthentication(osProfile.adminPassword() == null); } if (this.innerModel().osProfile().computerName() == null) { if (vmName.matches("[0-9]+")) { this.innerModel().osProfile().withComputerName(namer.getRandomName("vm", 15)); } else if (vmName.length() <= 15) { this.innerModel().osProfile().withComputerName(vmName); } else { this.innerModel().osProfile().withComputerName(namer.getRandomName("vm", 15)); } } } else { this.innerModel().withOsProfile(null); } } private void setHardwareProfileDefaults() { if (!isInCreateMode()) { return; } HardwareProfile hardwareProfile = this.innerModel().hardwareProfile(); if (hardwareProfile.vmSize() == null) { hardwareProfile.withVmSize(VirtualMachineSizeTypes.BASIC_A0); } } /** Prepare virtual machine disks profile (StorageProfile). */ private void handleUnManagedOSAndDataDisksStorageSettings() { if (isManagedDiskEnabled()) { return; } StorageAccount storageAccount = null; if (this.creatableStorageAccountKey != null) { storageAccount = this.taskResult(this.creatableStorageAccountKey); } else if (this.existingStorageAccountToAssociate != null) { storageAccount = this.existingStorageAccountToAssociate; } if (isInCreateMode()) { if (storageAccount != null) { if (isOSDiskFromPlatformImage(innerModel().storageProfile())) { String uri = innerModel() .storageProfile() .osDisk() .vhd() .uri() .replaceFirst("\\{storage-base-url}", storageAccount.endPoints().primary().blob()); innerModel().storageProfile().osDisk().vhd().withUri(uri); } UnmanagedDataDiskImpl.ensureDisksVhdUri(unmanagedDataDisks, storageAccount, vmName); } } else { if (storageAccount != null) { UnmanagedDataDiskImpl.ensureDisksVhdUri(unmanagedDataDisks, storageAccount, vmName); } else { UnmanagedDataDiskImpl.ensureDisksVhdUri(unmanagedDataDisks, vmName); } } } private Mono<VirtualMachineImpl> createNewProximityPlacementGroupAsync() { if (isInCreateMode()) { if (this.newProximityPlacementGroupName != null && !this.newProximityPlacementGroupName.isEmpty()) { ProximityPlacementGroupInner plgInner = new ProximityPlacementGroupInner(); plgInner.withProximityPlacementGroupType(this.newProximityPlacementGroupType); plgInner.withLocation(this.innerModel().location()); return this .manager() .serviceClient() .getProximityPlacementGroups() .createOrUpdateAsync(this.resourceGroupName(), this.newProximityPlacementGroupName, plgInner) .map( createdPlgInner -> { this .innerModel() .withProximityPlacementGroup(new SubResource().withId(createdPlgInner.id())); return this; }); } } return Mono.just(this); } private void handleNetworkSettings() { if (isInCreateMode()) { NetworkInterface primaryNetworkInterface = null; if (this.creatablePrimaryNetworkInterfaceKey != null) { primaryNetworkInterface = this.taskResult(this.creatablePrimaryNetworkInterfaceKey); } else if (this.existingPrimaryNetworkInterfaceToAssociate != null) { primaryNetworkInterface = this.existingPrimaryNetworkInterfaceToAssociate; } if (primaryNetworkInterface != null) { NetworkInterfaceReference nicReference = new NetworkInterfaceReference(); nicReference.withPrimary(true); nicReference.withId(primaryNetworkInterface.id()); this.innerModel().networkProfile().networkInterfaces().add(nicReference); } } for (String creatableSecondaryNetworkInterfaceKey : this.creatableSecondaryNetworkInterfaceKeys) { NetworkInterface secondaryNetworkInterface = this.taskResult(creatableSecondaryNetworkInterfaceKey); NetworkInterfaceReference nicReference = new NetworkInterfaceReference(); nicReference.withPrimary(false); nicReference.withId(secondaryNetworkInterface.id()); this.innerModel().networkProfile().networkInterfaces().add(nicReference); } for (NetworkInterface secondaryNetworkInterface : this.existingSecondaryNetworkInterfacesToAssociate) { NetworkInterfaceReference nicReference = new NetworkInterfaceReference(); nicReference.withPrimary(false); nicReference.withId(secondaryNetworkInterface.id()); this.innerModel().networkProfile().networkInterfaces().add(nicReference); } } private void handleAvailabilitySettings() { if (!isInCreateMode()) { return; } AvailabilitySet availabilitySet = null; if (this.creatableAvailabilitySetKey != null) { availabilitySet = this.taskResult(this.creatableAvailabilitySetKey); } else if (this.existingAvailabilitySetToAssociate != null) { availabilitySet = this.existingAvailabilitySetToAssociate; } if (availabilitySet != null) { if (this.innerModel().availabilitySet() == null) { this.innerModel().withAvailabilitySet(new SubResource()); } this.innerModel().availabilitySet().withId(availabilitySet.id()); } } private boolean osDiskRequiresImplicitStorageAccountCreation() { if (isManagedDiskEnabled()) { return false; } if (this.creatableStorageAccountKey != null || this.existingStorageAccountToAssociate != null || !isInCreateMode()) { return false; } return isOSDiskFromPlatformImage(this.innerModel().storageProfile()); } private boolean dataDisksRequiresImplicitStorageAccountCreation() { if (isManagedDiskEnabled()) { return false; } if (this.creatableStorageAccountKey != null || this.existingStorageAccountToAssociate != null || this.unmanagedDataDisks.size() == 0) { return false; } boolean hasEmptyVhd = false; for (VirtualMachineUnmanagedDataDisk dataDisk : this.unmanagedDataDisks) { if (dataDisk.creationMethod() == DiskCreateOptionTypes.EMPTY || dataDisk.creationMethod() == DiskCreateOptionTypes.FROM_IMAGE) { if (dataDisk.innerModel().vhd() == null) { hasEmptyVhd = true; break; } } } if (isInCreateMode()) { return hasEmptyVhd; } if (hasEmptyVhd) { for (VirtualMachineUnmanagedDataDisk dataDisk : this.unmanagedDataDisks) { if (dataDisk.creationMethod() == DiskCreateOptionTypes.ATTACH && dataDisk.innerModel().vhd() != null) { return false; } } return true; } return false; } /** * Checks whether the OS disk is directly attached to a unmanaged VHD. * * @param osDisk the osDisk value in the storage profile * @return true if the OS disk is attached to a unmanaged VHD, false otherwise */ private boolean isOSDiskAttachedUnmanaged(OSDisk osDisk) { return osDisk.createOption() == DiskCreateOptionTypes.ATTACH && osDisk.vhd() != null && osDisk.vhd().uri() != null; } /** * Checks whether the OS disk is directly attached to a managed disk. * * @param osDisk the osDisk value in the storage profile * @return true if the OS disk is attached to a managed disk, false otherwise */ private boolean isOSDiskAttachedManaged(OSDisk osDisk) { return osDisk.createOption() == DiskCreateOptionTypes.ATTACH && osDisk.managedDisk() != null && osDisk.managedDisk().id() != null; } /** * Checks whether the OS disk is based on an image (image from PIR or custom image [captured, bringYourOwnFeature]). * * @param osDisk the osDisk value in the storage profile * @return true if the OS disk is configured to use image from PIR or custom image */ private boolean isOSDiskFromImage(OSDisk osDisk) { return osDisk.createOption() == DiskCreateOptionTypes.FROM_IMAGE; } /** * Checks whether the OS disk is based on an platform image (image in PIR). * * @param storageProfile the storage profile * @return true if the OS disk is configured to be based on platform image. */ private boolean isOSDiskFromPlatformImage(StorageProfile storageProfile) { ImageReference imageReference = storageProfile.imageReference(); return isOSDiskFromImage(storageProfile.osDisk()) && imageReference != null && imageReference.publisher() != null && imageReference.offer() != null && imageReference.sku() != null && imageReference.version() != null; } /** * Checks whether the OS disk is based on a CustomImage. * * <p>A custom image is represented by {@link VirtualMachineCustomImage}. * * @param storageProfile the storage profile * @return true if the OS disk is configured to be based on custom image. */ private boolean isOsDiskFromCustomImage(StorageProfile storageProfile) { ImageReference imageReference = storageProfile.imageReference(); return isOSDiskFromImage(storageProfile.osDisk()) && imageReference != null && imageReference.id() != null; } /** * Checks whether the OS disk is based on a stored image ('captured' or 'bring your own feature'). * * <p>A stored image is created by calling {@link VirtualMachine * * @param storageProfile the storage profile * @return true if the OS disk is configured to use custom image ('captured' or 'bring your own feature') */ private boolean isOSDiskFromStoredImage(StorageProfile storageProfile) { OSDisk osDisk = storageProfile.osDisk(); return isOSDiskFromImage(osDisk) && osDisk.image() != null && osDisk.image().uri() != null; } private String temporaryBlobUrl(String containerName, String blobName) { return "{storage-base-url}" + containerName + "/" + blobName; } private NetworkInterface.DefinitionStages.WithPrimaryPublicIPAddress prepareNetworkInterface(String name) { NetworkInterface.DefinitionStages.WithGroup definitionWithGroup = this.networkManager.networkInterfaces().define(name).withRegion(this.regionName()); NetworkInterface.DefinitionStages.WithPrimaryNetwork definitionWithNetwork; if (this.creatableGroup != null) { definitionWithNetwork = definitionWithGroup.withNewResourceGroup(this.creatableGroup); } else { definitionWithNetwork = definitionWithGroup.withExistingResourceGroup(this.resourceGroupName()); } return definitionWithNetwork.withNewPrimaryNetwork("vnet" + name).withPrimaryPrivateIPAddressDynamic(); } private void initializeDataDisks() { if (this.innerModel().storageProfile().dataDisks() == null) { this.innerModel().storageProfile().withDataDisks(new ArrayList<>()); } this.isUnmanagedDiskSelected = false; this.managedDataDisks.clear(); this.unmanagedDataDisks = new ArrayList<>(); if (!isManagedDiskEnabled()) { for (DataDisk dataDiskInner : this.storageProfile().dataDisks()) { this.unmanagedDataDisks.add(new UnmanagedDataDiskImpl(dataDiskInner, this)); } } } private NetworkInterface.DefinitionStages.WithPrimaryNetwork preparePrimaryNetworkInterface(String name) { NetworkInterface.DefinitionStages.WithGroup definitionWithGroup = this.networkManager.networkInterfaces().define(name).withRegion(this.regionName()); NetworkInterface.DefinitionStages.WithPrimaryNetwork definitionAfterGroup; if (this.creatableGroup != null) { definitionAfterGroup = definitionWithGroup.withNewResourceGroup(this.creatableGroup); } else { definitionAfterGroup = definitionWithGroup.withExistingResourceGroup(this.resourceGroupName()); } return definitionAfterGroup; } private void clearCachedRelatedResources() { this.virtualMachineInstanceView = null; } private void throwIfManagedDiskEnabled(String message) { if (this.isManagedDiskEnabled()) { throw logger.logExceptionAsError(new UnsupportedOperationException(message)); } } private void throwIfManagedDiskDisabled(String message) { if (!this.isManagedDiskEnabled()) { throw logger.logExceptionAsError(new UnsupportedOperationException(message)); } } private boolean isInUpdateMode() { return !this.isInCreateMode(); } boolean isVirtualMachineModifiedDuringUpdate(VirtualMachineUpdateInner updateParameter) { if (updateParameterSnapshotOnUpdate == null || updateParameter == null) { return true; } else { try { String jsonStrSnapshot = SERIALIZER_ADAPTER.serialize(updateParameterSnapshotOnUpdate, SerializerEncoding.JSON); String jsonStr = SERIALIZER_ADAPTER.serialize(updateParameter, SerializerEncoding.JSON); return !jsonStr.equals(jsonStrSnapshot); } catch (IOException e) { return true; } } } VirtualMachineUpdateInner deepCopyInnerToUpdateParameter() { VirtualMachineUpdateInner updateParameter = new VirtualMachineUpdateInner(); copyInnerToUpdateParameter(updateParameter); try { String jsonStr = SERIALIZER_ADAPTER.serialize(updateParameter, SerializerEncoding.JSON); updateParameter = SERIALIZER_ADAPTER.deserialize(jsonStr, VirtualMachineUpdateInner.class, SerializerEncoding.JSON); } catch (IOException e) { return null; } if (this.innerModel().identity() != null) { VirtualMachineIdentity identity = new VirtualMachineIdentity(); identity.withType(this.innerModel().identity().type()); updateParameter.withIdentity(identity); } return updateParameter; } RoleAssignmentHelper.IdProvider idProvider() { return new RoleAssignmentHelper.IdProvider() { @Override public String principalId() { if (innerModel() != null && innerModel().identity() != null) { return innerModel().identity().principalId(); } else { return null; } } @Override public String resourceId() { if (innerModel() != null) { return innerModel().id(); } else { return null; } } }; } /** Class to manage Data disk collection. */ private class ManagedDataDiskCollection { private final Map<String, DataDisk> newDisksToAttach = new HashMap<>(); private final List<DataDisk> existingDisksToAttach = new ArrayList<>(); private final List<DataDisk> implicitDisksToAssociate = new ArrayList<>(); private final List<Integer> diskLunsToRemove = new ArrayList<>(); private final List<DataDisk> newDisksFromImage = new ArrayList<>(); private final VirtualMachineImpl vm; private CachingTypes defaultCachingType; private StorageAccountTypes defaultStorageAccountType; ManagedDataDiskCollection(VirtualMachineImpl vm) { this.vm = vm; } void setDefaultCachingType(CachingTypes cachingType) { this.defaultCachingType = cachingType; } void setDefaultStorageAccountType(StorageAccountTypes defaultStorageAccountType) { this.defaultStorageAccountType = defaultStorageAccountType; } void setDataDisksDefaults() { VirtualMachineInner vmInner = this.vm.innerModel(); if (isPending()) { if (vmInner.storageProfile().dataDisks() == null) { vmInner.storageProfile().withDataDisks(new ArrayList<>()); } List<DataDisk> dataDisks = vmInner.storageProfile().dataDisks(); final List<Integer> usedLuns = new ArrayList<>(); for (DataDisk dataDisk : dataDisks) { if (dataDisk.lun() != -1) { usedLuns.add(dataDisk.lun()); } } for (DataDisk dataDisk : this.newDisksToAttach.values()) { if (dataDisk.lun() != -1) { usedLuns.add(dataDisk.lun()); } } for (DataDisk dataDisk : this.existingDisksToAttach) { if (dataDisk.lun() != -1) { usedLuns.add(dataDisk.lun()); } } for (DataDisk dataDisk : this.implicitDisksToAssociate) { if (dataDisk.lun() != -1) { usedLuns.add(dataDisk.lun()); } } for (DataDisk dataDisk : this.newDisksFromImage) { if (dataDisk.lun() != -1) { usedLuns.add(dataDisk.lun()); } } Callable<Integer> nextLun = () -> { Integer lun = 0; while (usedLuns.contains(lun)) { lun++; } usedLuns.add(lun); return lun; }; try { setAttachableNewDataDisks(nextLun); setAttachableExistingDataDisks(nextLun); setImplicitDataDisks(nextLun); } catch (Exception ex) { throw logger.logExceptionAsError(Exceptions.propagate(ex)); } setImageBasedDataDisks(); removeDataDisks(); } if (vmInner.storageProfile().dataDisks() != null && vmInner.storageProfile().dataDisks().size() == 0) { if (vm.isInCreateMode()) { vmInner.storageProfile().withDataDisks(null); } } this.clear(); } private void clear() { newDisksToAttach.clear(); existingDisksToAttach.clear(); implicitDisksToAssociate.clear(); diskLunsToRemove.clear(); newDisksFromImage.clear(); } private boolean isPending() { return newDisksToAttach.size() > 0 || existingDisksToAttach.size() > 0 || implicitDisksToAssociate.size() > 0 || diskLunsToRemove.size() > 0 || newDisksFromImage.size() > 0; } private void setAttachableNewDataDisks(Callable<Integer> nextLun) throws Exception { List<DataDisk> dataDisks = vm.innerModel().storageProfile().dataDisks(); for (Map.Entry<String, DataDisk> entry : this.newDisksToAttach.entrySet()) { Disk managedDisk = vm.taskResult(entry.getKey()); DataDisk dataDisk = entry.getValue(); dataDisk.withCreateOption(DiskCreateOptionTypes.ATTACH); if (dataDisk.lun() == -1) { dataDisk.withLun(nextLun.call()); } dataDisk.withManagedDisk(new ManagedDiskParameters()); dataDisk.managedDisk().withId(managedDisk.id()); if (dataDisk.caching() == null) { dataDisk.withCaching(getDefaultCachingType()); } dataDisk.withName(null); dataDisks.add(dataDisk); } } private void setAttachableExistingDataDisks(Callable<Integer> nextLun) throws Exception { List<DataDisk> dataDisks = vm.innerModel().storageProfile().dataDisks(); for (DataDisk dataDisk : this.existingDisksToAttach) { dataDisk.withCreateOption(DiskCreateOptionTypes.ATTACH); if (dataDisk.lun() == -1) { dataDisk.withLun(nextLun.call()); } if (dataDisk.caching() == null) { dataDisk.withCaching(getDefaultCachingType()); } dataDisk.withName(null); dataDisks.add(dataDisk); } } private void setImplicitDataDisks(Callable<Integer> nextLun) throws Exception { List<DataDisk> dataDisks = vm.innerModel().storageProfile().dataDisks(); for (DataDisk dataDisk : this.implicitDisksToAssociate) { dataDisk.withCreateOption(DiskCreateOptionTypes.EMPTY); if (dataDisk.lun() == -1) { dataDisk.withLun(nextLun.call()); } if (dataDisk.caching() == null) { dataDisk.withCaching(getDefaultCachingType()); } if (dataDisk.managedDisk() == null) { dataDisk.withManagedDisk(new ManagedDiskParameters()); } if (dataDisk.managedDisk().storageAccountType() == null) { dataDisk.managedDisk().withStorageAccountType(getDefaultStorageAccountType()); } dataDisk.withName(null); dataDisks.add(dataDisk); } } private void setImageBasedDataDisks() { List<DataDisk> dataDisks = vm.innerModel().storageProfile().dataDisks(); for (DataDisk dataDisk : this.newDisksFromImage) { dataDisk.withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); dataDisk.withName(null); dataDisks.add(dataDisk); } } private void removeDataDisks() { List<DataDisk> dataDisks = vm.innerModel().storageProfile().dataDisks(); for (Integer lun : this.diskLunsToRemove) { int indexToRemove = 0; for (DataDisk dataDisk : dataDisks) { if (dataDisk.lun() == lun) { dataDisks.remove(indexToRemove); break; } indexToRemove++; } } } private CachingTypes getDefaultCachingType() { if (defaultCachingType == null) { return CachingTypes.READ_WRITE; } return defaultCachingType; } private StorageAccountTypes getDefaultStorageAccountType() { if (defaultStorageAccountType == null) { return StorageAccountTypes.STANDARD_LRS; } return defaultStorageAccountType; } } /** Class to manage VM boot diagnostics settings. */ private class BootDiagnosticsHandler { private final VirtualMachineImpl vmImpl; private String creatableDiagnosticsStorageAccountKey; private boolean useManagedStorageAccount = false; BootDiagnosticsHandler(VirtualMachineImpl vmImpl) { this.vmImpl = vmImpl; if (isBootDiagnosticsEnabled() && this.vmInner().diagnosticsProfile().bootDiagnostics().storageUri() == null) { this.useManagedStorageAccount = true; } } public boolean isBootDiagnosticsEnabled() { if (this.vmInner().diagnosticsProfile() != null && this.vmInner().diagnosticsProfile().bootDiagnostics() != null && this.vmInner().diagnosticsProfile().bootDiagnostics().enabled() != null) { return this.vmInner().diagnosticsProfile().bootDiagnostics().enabled(); } return false; } public String bootDiagnosticsStorageUri() { if (this.vmInner().diagnosticsProfile() != null && this.vmInner().diagnosticsProfile().bootDiagnostics() != null) { return this.vmInner().diagnosticsProfile().bootDiagnostics().storageUri(); } return null; } BootDiagnosticsHandler withBootDiagnostics(boolean useManagedStorageAccount) { this.enableDisable(true); this.useManagedStorageAccount = useManagedStorageAccount; return this; } BootDiagnosticsHandler withBootDiagnostics(Creatable<StorageAccount> creatable) { this.enableDisable(true); this.useManagedStorageAccount = false; this.creatableDiagnosticsStorageAccountKey = this.vmImpl.addDependency(creatable); return this; } BootDiagnosticsHandler withBootDiagnostics(String storageAccountBlobEndpointUri) { this.enableDisable(true); this.useManagedStorageAccount = false; this.vmInner().diagnosticsProfile().bootDiagnostics().withStorageUri(storageAccountBlobEndpointUri); return this; } BootDiagnosticsHandler withBootDiagnostics(StorageAccount storageAccount) { return this.withBootDiagnostics(storageAccount.endPoints().primary().blob()); } BootDiagnosticsHandler withoutBootDiagnostics() { this.enableDisable(false); this.useManagedStorageAccount = false; return this; } void prepare() { if (useManagedStorageAccount) { return; } DiagnosticsProfile diagnosticsProfile = this.vmInner().diagnosticsProfile(); if (diagnosticsProfile == null || diagnosticsProfile.bootDiagnostics() == null || diagnosticsProfile.bootDiagnostics().storageUri() != null) { return; } boolean enableBD = ResourceManagerUtils.toPrimitiveBoolean(diagnosticsProfile.bootDiagnostics().enabled()); if (!enableBD) { return; } if (this.creatableDiagnosticsStorageAccountKey != null || this.vmImpl.creatableStorageAccountKey != null || this.vmImpl.existingStorageAccountToAssociate != null) { return; } String accountName = this.vmImpl.namer.getRandomName("stg", 24).replace("-", ""); Creatable<StorageAccount> storageAccountCreatable; if (this.vmImpl.creatableGroup != null) { storageAccountCreatable = this .vmImpl .storageManager .storageAccounts() .define(accountName) .withRegion(this.vmImpl.regionName()) .withNewResourceGroup(this.vmImpl.creatableGroup); } else { storageAccountCreatable = this .vmImpl .storageManager .storageAccounts() .define(accountName) .withRegion(this.vmImpl.regionName()) .withExistingResourceGroup(this.vmImpl.resourceGroupName()); } this.creatableDiagnosticsStorageAccountKey = this.vmImpl.addDependency(storageAccountCreatable); } void handleDiagnosticsSettings() { if (useManagedStorageAccount) { return; } DiagnosticsProfile diagnosticsProfile = this.vmInner().diagnosticsProfile(); if (diagnosticsProfile == null || diagnosticsProfile.bootDiagnostics() == null || diagnosticsProfile.bootDiagnostics().storageUri() != null) { return; } boolean enableBD = ResourceManagerUtils.toPrimitiveBoolean(diagnosticsProfile.bootDiagnostics().enabled()); if (!enableBD) { return; } StorageAccount storageAccount = null; if (creatableDiagnosticsStorageAccountKey != null) { storageAccount = this.vmImpl.<StorageAccount>taskResult(creatableDiagnosticsStorageAccountKey); } else if (this.vmImpl.creatableStorageAccountKey != null) { storageAccount = this.vmImpl.<StorageAccount>taskResult(this.vmImpl.creatableStorageAccountKey); } else if (this.vmImpl.existingStorageAccountToAssociate != null) { storageAccount = this.vmImpl.existingStorageAccountToAssociate; } if (storageAccount == null) { throw logger .logExceptionAsError( new IllegalStateException( "Unable to retrieve expected storageAccount instance for BootDiagnostics")); } vmInner() .diagnosticsProfile() .bootDiagnostics() .withStorageUri(storageAccount.endPoints().primary().blob()); } private VirtualMachineInner vmInner() { return this.vmImpl.innerModel(); } private void enableDisable(boolean enable) { if (this.vmInner().diagnosticsProfile() == null) { this.vmInner().withDiagnosticsProfile(new DiagnosticsProfile()); } if (this.vmInner().diagnosticsProfile().bootDiagnostics() == null) { this.vmInner().diagnosticsProfile().withBootDiagnostics(new BootDiagnostics()); } if (enable) { this.vmInner().diagnosticsProfile().bootDiagnostics().withEnabled(true); } else { this.vmInner().diagnosticsProfile().bootDiagnostics().withEnabled(false); this.vmInner().diagnosticsProfile().bootDiagnostics().withStorageUri(null); } } } }
Same thoughts on this being made verbose
public void onError(Throwable throwable) { Objects.requireNonNull(throwable, "'throwable' is required."); if (isRetryPending.get() && retryPolicy.calculateRetryDelay(throwable, retryAttempts.get()) != null) { logger.warning("Retry is already pending. Ignoring transient error.", throwable); return; } final int attemptsMade = retryAttempts.incrementAndGet(); final int attempts; final Duration retryInterval; if (((throwable instanceof AmqpException) && ((AmqpException) throwable).isTransient()) || (throwable instanceof IllegalStateException) || (throwable instanceof RejectedExecutionException)) { attempts = Math.min(attemptsMade, retryPolicy.getMaxRetries()); final Throwable throwableToUse = throwable instanceof AmqpException ? throwable : new AmqpException(true, "Non-AmqpException occurred upstream.", throwable, errorContext); retryInterval = retryPolicy.calculateRetryDelay(throwableToUse, attempts); } else { attempts = attemptsMade; retryInterval = retryPolicy.calculateRetryDelay(throwable, attempts); } if (retryInterval != null) { if (isRetryPending.getAndSet(true)) { retryAttempts.decrementAndGet(); return; } logger.info("Retry retrySubscription = Mono.delay(retryInterval).subscribe(i -> { if (isDisposed()) { logger.info("Retry } else { logger.info("Retry requestUpstream(); isRetryPending.set(false); } }); } else { logger.warning("Retry lastError = throwable; isDisposed.set(true); dispose(); synchronized (lock) { final ConcurrentLinkedDeque<ChannelSubscriber<T>> currentSubscribers = subscribers; subscribers = new ConcurrentLinkedDeque<>(); logger.info("Error in AMQP channel processor. Notifying {} subscribers.", currentSubscribers.size()); currentSubscribers.forEach(subscriber -> subscriber.onError(throwable)); } } }
logger.info("Retry
public void onError(Throwable throwable) { Objects.requireNonNull(throwable, "'throwable' is required."); if (isRetryPending.get() && retryPolicy.calculateRetryDelay(throwable, retryAttempts.get()) != null) { logger.warning("Retry is already pending. Ignoring transient error.", throwable); return; } final int attemptsMade = retryAttempts.incrementAndGet(); final int attempts; final Duration retryInterval; if (((throwable instanceof AmqpException) && ((AmqpException) throwable).isTransient()) || (throwable instanceof IllegalStateException) || (throwable instanceof RejectedExecutionException)) { attempts = Math.min(attemptsMade, retryPolicy.getMaxRetries()); final Throwable throwableToUse = throwable instanceof AmqpException ? throwable : new AmqpException(true, "Non-AmqpException occurred upstream.", throwable, errorContext); retryInterval = retryPolicy.calculateRetryDelay(throwableToUse, attempts); } else { attempts = attemptsMade; retryInterval = retryPolicy.calculateRetryDelay(throwable, attempts); } if (retryInterval != null) { if (isRetryPending.getAndSet(true)) { retryAttempts.decrementAndGet(); return; } logger.atInfo() .addKeyValue(RETRY_NUMBER_KEY, attempts) .addKeyValue(INTERVAL_KEY, retryInterval.toMillis()) .log("Transient error occurred. Retrying.", throwable); retrySubscription = Mono.delay(retryInterval).subscribe(i -> { if (isDisposed()) { logger.atInfo() .addKeyValue(RETRY_NUMBER_KEY, attempts) .log("Not requesting from upstream. Processor is disposed."); } else { logger.atInfo() .addKeyValue(RETRY_NUMBER_KEY, attempts) .log("Requesting from upstream."); requestUpstream(); isRetryPending.set(false); } }); } else { logger.atWarning() .addKeyValue(RETRY_NUMBER_KEY, attempts) .log("Retry attempts exhausted or exception was not retriable.", throwable); lastError = throwable; isDisposed.set(true); dispose(); synchronized (lock) { final ConcurrentLinkedDeque<ChannelSubscriber<T>> currentSubscribers = subscribers; subscribers = new ConcurrentLinkedDeque<>(); logger.info("Error in AMQP channel processor. Notifying {} subscribers.", currentSubscribers.size()); currentSubscribers.forEach(subscriber -> subscriber.onError(throwable)); } } }
class AmqpChannelProcessor<T> extends Mono<T> implements Processor<T, T>, CoreSubscriber<T>, Disposable { @SuppressWarnings("rawtypes") private static final AtomicReferenceFieldUpdater<AmqpChannelProcessor, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(AmqpChannelProcessor.class, Subscription.class, "upstream"); private final ClientLogger logger; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicBoolean isRequested = new AtomicBoolean(); private final AtomicBoolean isRetryPending = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Object lock = new Object(); private final AmqpRetryPolicy retryPolicy; private final Function<T, Flux<AmqpEndpointState>> endpointStatesFunction; private final AmqpErrorContext errorContext; private volatile Subscription upstream; private volatile ConcurrentLinkedDeque<ChannelSubscriber<T>> subscribers = new ConcurrentLinkedDeque<>(); private volatile Throwable lastError; private volatile T currentChannel; private volatile Disposable connectionSubscription; private volatile Disposable retrySubscription; public AmqpChannelProcessor(String fullyQualifiedNamespace, String entityPath, Function<T, Flux<AmqpEndpointState>> endpointStatesFunction, AmqpRetryPolicy retryPolicy, ClientLogger logger) { this.endpointStatesFunction = Objects.requireNonNull(endpointStatesFunction, "'endpointStates' cannot be null."); this.retryPolicy = Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null."); /*Map<String, Object> loggingContext = new HashMap<>(); loggingContext.put(FULLY_QUALIFIED_NAMESPACE_KEY, Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null.")); loggingContext.put(ENTITY_PATH_KEY, Objects.requireNonNull(entityPath, "'entityPath' cannot be null."));*/ this.logger = Objects.requireNonNull(logger, "'retryPolicy' cannot be null."); this.errorContext = new AmqpErrorContext(fullyQualifiedNamespace); } @Override public void onSubscribe(Subscription subscription) { if (Operators.setOnce(UPSTREAM, this, subscription)) { isRequested.set(true); subscription.request(1); } else { logger.warning("Processors can only be subscribed to once."); } } @Override public void onNext(T amqpChannel) { logger.info("Setting next AMQP channel."); Objects.requireNonNull(amqpChannel, "'amqpChannel' cannot be null."); final T oldChannel; final Disposable oldSubscription; synchronized (lock) { oldChannel = currentChannel; oldSubscription = connectionSubscription; currentChannel = amqpChannel; final ConcurrentLinkedDeque<ChannelSubscriber<T>> currentSubscribers = subscribers; logger.info("Next AMQP channel received, updating {} current subscribers", subscribers.size()); currentSubscribers.forEach(subscription -> subscription.onNext(amqpChannel)); connectionSubscription = endpointStatesFunction.apply(amqpChannel).subscribe( state -> { if (state == AmqpEndpointState.ACTIVE) { retryAttempts.set(0); logger.info("Channel is now active."); } }, error -> { setAndClearChannel(); onError(error); }, () -> { if (isDisposed()) { logger.info("Channel is disposed."); } else { logger.info("Channel is closed. Requesting upstream."); setAndClearChannel(); requestUpstream(); } }); } close(oldChannel); if (oldSubscription != null) { oldSubscription.dispose(); } isRequested.set(false); } /** * When downstream or upstream encounters an error, calculates whether to request another item upstream. * * @param throwable Exception to analyse. */ @Override @Override public void onComplete() { logger.info("Upstream connection publisher was completed. Terminating processor."); isDisposed.set(true); synchronized (lock) { final ConcurrentLinkedDeque<ChannelSubscriber<T>> currentSubscribers = subscribers; subscribers = new ConcurrentLinkedDeque<>(); logger.info("AMQP channel processor completed. Notifying {} subscribers.", currentSubscribers.size()); currentSubscribers.forEach(subscriber -> subscriber.onComplete()); } } @Override public void subscribe(CoreSubscriber<? super T> actual) { if (isDisposed()) { if (lastError != null) { actual.onSubscribe(Operators.emptySubscription()); actual.onError(lastError); } else { Operators.error(actual, logger.logExceptionAsError(new IllegalStateException("Cannot subscribe. Processor is already terminated."))); } return; } final ChannelSubscriber<T> subscriber = new ChannelSubscriber<T>(actual, this); actual.onSubscribe(subscriber); synchronized (lock) { if (currentChannel != null) { subscriber.complete(currentChannel); return; } } subscribers.add(subscriber); logger.atVerbose().addKeyValue("subscribers", subscribers.size()).log("Added a subscriber."); if (!isRetryPending.get()) { requestUpstream(); } } @Override public void dispose() { if (isDisposed.getAndSet(true)) { return; } if (retrySubscription != null && !retrySubscription.isDisposed()) { retrySubscription.dispose(); } onComplete(); synchronized (lock) { setAndClearChannel(); } } @Override public boolean isDisposed() { return isDisposed.get(); } private void requestUpstream() { if (currentChannel != null) { logger.verbose("Connection exists, not requesting another."); return; } else if (isDisposed()) { logger.verbose("Is already disposed."); return; } final Subscription subscription = UPSTREAM.get(this); if (subscription == null) { logger.warning("There is no upstream subscription."); return; } if (!isRequested.getAndSet(true)) { logger.info("Connection not requested, yet. Requesting one."); subscription.request(1); } } private void setAndClearChannel() { T oldChannel; synchronized (lock) { oldChannel = currentChannel; currentChannel = null; } close(oldChannel); } /** * Checks the current state of the channel for this channel and returns true if the channel is null or if this * processor is disposed. * * @return true if the current channel in the processor is null or if the processor is disposed */ public boolean isChannelClosed() { synchronized (lock) { return currentChannel == null || isDisposed(); } } private void close(T channel) { if (channel instanceof AsyncCloseable) { ((AsyncCloseable) channel).closeAsync().subscribe(); } else if (channel instanceof AutoCloseable) { try { ((AutoCloseable) channel).close(); } catch (Exception error) { logger.warning("Error occurred closing AutoCloseable channel.", error); } } else if (channel instanceof Disposable) { try { ((Disposable) channel).dispose(); } catch (Exception error) { logger.warning("Error occurred closing Disposable channel.", error); } } } /** * Represents the decorator-subscriber wrapping a downstream subscriber to AmqpChannelProcessor. * These are the subscribers waiting to receive a channel that is yet to be available in the AmqpChannelProcessor. * The AmqpChannelProcessor tracks a list of such waiting subscribers; once the processor receives * a result (channel, error or disposal) from it's upstream, each decorated-subscriber will be notified, * which removes itself from the tracking list, then propagates the notification to the wrapped subscriber. */ private static final class ChannelSubscriber<T> extends Operators.MonoSubscriber<T, T> { private final AmqpChannelProcessor<T> processor; private ChannelSubscriber(CoreSubscriber<? super T> actual, AmqpChannelProcessor<T> processor) { super(actual); this.processor = processor; } @Override public void cancel() { processor.subscribers.remove(this); super.cancel(); } @Override public void onComplete() { if (!isCancelled()) { processor.subscribers.remove(this); actual.onComplete(); } } @Override public void onNext(T channel) { if (!isCancelled()) { processor.subscribers.remove(this); super.complete(channel); } } @Override public void onError(Throwable throwable) { if (!isCancelled()) { processor.subscribers.remove(this); actual.onError(throwable); } else { Operators.onErrorDropped(throwable, currentContext()); } } } }
class AmqpChannelProcessor<T> extends Mono<T> implements Processor<T, T>, CoreSubscriber<T>, Disposable { @SuppressWarnings("rawtypes") private static final AtomicReferenceFieldUpdater<AmqpChannelProcessor, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(AmqpChannelProcessor.class, Subscription.class, "upstream"); private static final String RETRY_NUMBER_KEY = "retry"; private final ClientLogger logger; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicBoolean isRequested = new AtomicBoolean(); private final AtomicBoolean isRetryPending = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Object lock = new Object(); private final AmqpRetryPolicy retryPolicy; private final Function<T, Flux<AmqpEndpointState>> endpointStatesFunction; private final AmqpErrorContext errorContext; private volatile Subscription upstream; private volatile ConcurrentLinkedDeque<ChannelSubscriber<T>> subscribers = new ConcurrentLinkedDeque<>(); private volatile Throwable lastError; private volatile T currentChannel; private volatile Disposable connectionSubscription; private volatile Disposable retrySubscription; /** * @deprecated Use constructor overload that does not take {@link ClientLogger} */ @Deprecated public AmqpChannelProcessor(String fullyQualifiedNamespace, String entityPath, Function<T, Flux<AmqpEndpointState>> endpointStatesFunction, AmqpRetryPolicy retryPolicy, ClientLogger logger) { this.endpointStatesFunction = Objects.requireNonNull(endpointStatesFunction, "'endpointStates' cannot be null."); this.retryPolicy = Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null."); this.logger = Objects.requireNonNull(logger, "'logger' cannot be null."); this.errorContext = new AmqpErrorContext(fullyQualifiedNamespace); } public AmqpChannelProcessor(String fullyQualifiedNamespace, Function<T, Flux<AmqpEndpointState>> endpointStatesFunction, AmqpRetryPolicy retryPolicy, Map<String, Object> loggingContext) { this.endpointStatesFunction = Objects.requireNonNull(endpointStatesFunction, "'endpointStates' cannot be null."); this.retryPolicy = Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null."); this.logger = new ClientLogger(getClass(), Objects.requireNonNull(loggingContext, "'loggingContext' cannot be null.")); this.errorContext = new AmqpErrorContext(fullyQualifiedNamespace); } @Override public void onSubscribe(Subscription subscription) { if (Operators.setOnce(UPSTREAM, this, subscription)) { isRequested.set(true); subscription.request(1); } else { logger.warning("Processors can only be subscribed to once."); } } @Override public void onNext(T amqpChannel) { logger.info("Setting next AMQP channel."); Objects.requireNonNull(amqpChannel, "'amqpChannel' cannot be null."); final T oldChannel; final Disposable oldSubscription; synchronized (lock) { oldChannel = currentChannel; oldSubscription = connectionSubscription; currentChannel = amqpChannel; final ConcurrentLinkedDeque<ChannelSubscriber<T>> currentSubscribers = subscribers; logger.info("Next AMQP channel received, updating {} current subscribers", subscribers.size()); currentSubscribers.forEach(subscription -> subscription.onNext(amqpChannel)); connectionSubscription = endpointStatesFunction.apply(amqpChannel).subscribe( state -> { if (state == AmqpEndpointState.ACTIVE) { retryAttempts.set(0); logger.info("Channel is now active."); } }, error -> { setAndClearChannel(); onError(error); }, () -> { if (isDisposed()) { logger.info("Channel is disposed."); } else { logger.info("Channel is closed. Requesting upstream."); setAndClearChannel(); requestUpstream(); } }); } close(oldChannel); if (oldSubscription != null) { oldSubscription.dispose(); } isRequested.set(false); } /** * When downstream or upstream encounters an error, calculates whether to request another item upstream. * * @param throwable Exception to analyse. */ @Override @Override public void onComplete() { logger.info("Upstream connection publisher was completed. Terminating processor."); isDisposed.set(true); synchronized (lock) { final ConcurrentLinkedDeque<ChannelSubscriber<T>> currentSubscribers = subscribers; subscribers = new ConcurrentLinkedDeque<>(); logger.info("AMQP channel processor completed. Notifying {} subscribers.", currentSubscribers.size()); currentSubscribers.forEach(subscriber -> subscriber.onComplete()); } } @Override public void subscribe(CoreSubscriber<? super T> actual) { if (isDisposed()) { if (lastError != null) { actual.onSubscribe(Operators.emptySubscription()); actual.onError(lastError); } else { Operators.error(actual, logger.logExceptionAsError(new IllegalStateException("Cannot subscribe. Processor is already terminated."))); } return; } final ChannelSubscriber<T> subscriber = new ChannelSubscriber<T>(actual, this); actual.onSubscribe(subscriber); synchronized (lock) { if (currentChannel != null) { subscriber.complete(currentChannel); return; } } subscribers.add(subscriber); logger.atVerbose().addKeyValue("subscribers", subscribers.size()).log("Added a subscriber."); if (!isRetryPending.get()) { requestUpstream(); } } @Override public void dispose() { if (isDisposed.getAndSet(true)) { return; } if (retrySubscription != null && !retrySubscription.isDisposed()) { retrySubscription.dispose(); } onComplete(); synchronized (lock) { setAndClearChannel(); } } @Override public boolean isDisposed() { return isDisposed.get(); } private void requestUpstream() { if (currentChannel != null) { logger.verbose("Connection exists, not requesting another."); return; } else if (isDisposed()) { logger.verbose("Is already disposed."); return; } final Subscription subscription = UPSTREAM.get(this); if (subscription == null) { logger.warning("There is no upstream subscription."); return; } if (!isRequested.getAndSet(true)) { logger.info("Connection not requested, yet. Requesting one."); subscription.request(1); } } private void setAndClearChannel() { T oldChannel; synchronized (lock) { oldChannel = currentChannel; currentChannel = null; } close(oldChannel); } /** * Checks the current state of the channel for this channel and returns true if the channel is null or if this * processor is disposed. * * @return true if the current channel in the processor is null or if the processor is disposed */ public boolean isChannelClosed() { synchronized (lock) { return currentChannel == null || isDisposed(); } } private void close(T channel) { if (channel instanceof AsyncCloseable) { ((AsyncCloseable) channel).closeAsync().subscribe(); } else if (channel instanceof AutoCloseable) { try { ((AutoCloseable) channel).close(); } catch (Exception error) { logger.warning("Error occurred closing AutoCloseable channel.", error); } } else if (channel instanceof Disposable) { try { ((Disposable) channel).dispose(); } catch (Exception error) { logger.warning("Error occurred closing Disposable channel.", error); } } } /** * Represents the decorator-subscriber wrapping a downstream subscriber to AmqpChannelProcessor. * These are the subscribers waiting to receive a channel that is yet to be available in the AmqpChannelProcessor. * The AmqpChannelProcessor tracks a list of such waiting subscribers; once the processor receives * a result (channel, error or disposal) from it's upstream, each decorated-subscriber will be notified, * which removes itself from the tracking list, then propagates the notification to the wrapped subscriber. */ private static final class ChannelSubscriber<T> extends Operators.MonoSubscriber<T, T> { private final AmqpChannelProcessor<T> processor; private ChannelSubscriber(CoreSubscriber<? super T> actual, AmqpChannelProcessor<T> processor) { super(actual); this.processor = processor; } @Override public void cancel() { processor.subscribers.remove(this); super.cancel(); } @Override public void onComplete() { if (!isCancelled()) { processor.subscribers.remove(this); actual.onComplete(); } } @Override public void onNext(T channel) { if (!isCancelled()) { processor.subscribers.remove(this); super.complete(channel); } } @Override public void onError(Throwable throwable) { if (!isCancelled()) { processor.subscribers.remove(this); actual.onError(throwable); } else { Operators.onErrorDropped(throwable, currentContext()); } } } }
Should this be a warning log as runtime behavior isn't working as expected?
public void onError(Throwable throwable) { Objects.requireNonNull(throwable, "'throwable' is required."); if (isRetryPending.get() && retryPolicy.calculateRetryDelay(throwable, retryAttempts.get()) != null) { logger.warning("Retry is already pending. Ignoring transient error.", throwable); return; } final int attemptsMade = retryAttempts.incrementAndGet(); final int attempts; final Duration retryInterval; if (((throwable instanceof AmqpException) && ((AmqpException) throwable).isTransient()) || (throwable instanceof IllegalStateException) || (throwable instanceof RejectedExecutionException)) { attempts = Math.min(attemptsMade, retryPolicy.getMaxRetries()); final Throwable throwableToUse = throwable instanceof AmqpException ? throwable : new AmqpException(true, "Non-AmqpException occurred upstream.", throwable, errorContext); retryInterval = retryPolicy.calculateRetryDelay(throwableToUse, attempts); } else { attempts = attemptsMade; retryInterval = retryPolicy.calculateRetryDelay(throwable, attempts); } if (retryInterval != null) { if (isRetryPending.getAndSet(true)) { retryAttempts.decrementAndGet(); return; } logger.info("Retry retrySubscription = Mono.delay(retryInterval).subscribe(i -> { if (isDisposed()) { logger.info("Retry } else { logger.info("Retry requestUpstream(); isRetryPending.set(false); } }); } else { logger.warning("Retry lastError = throwable; isDisposed.set(true); dispose(); synchronized (lock) { final ConcurrentLinkedDeque<ChannelSubscriber<T>> currentSubscribers = subscribers; subscribers = new ConcurrentLinkedDeque<>(); logger.info("Error in AMQP channel processor. Notifying {} subscribers.", currentSubscribers.size()); currentSubscribers.forEach(subscriber -> subscriber.onError(throwable)); } } }
logger.info("Retry
public void onError(Throwable throwable) { Objects.requireNonNull(throwable, "'throwable' is required."); if (isRetryPending.get() && retryPolicy.calculateRetryDelay(throwable, retryAttempts.get()) != null) { logger.warning("Retry is already pending. Ignoring transient error.", throwable); return; } final int attemptsMade = retryAttempts.incrementAndGet(); final int attempts; final Duration retryInterval; if (((throwable instanceof AmqpException) && ((AmqpException) throwable).isTransient()) || (throwable instanceof IllegalStateException) || (throwable instanceof RejectedExecutionException)) { attempts = Math.min(attemptsMade, retryPolicy.getMaxRetries()); final Throwable throwableToUse = throwable instanceof AmqpException ? throwable : new AmqpException(true, "Non-AmqpException occurred upstream.", throwable, errorContext); retryInterval = retryPolicy.calculateRetryDelay(throwableToUse, attempts); } else { attempts = attemptsMade; retryInterval = retryPolicy.calculateRetryDelay(throwable, attempts); } if (retryInterval != null) { if (isRetryPending.getAndSet(true)) { retryAttempts.decrementAndGet(); return; } logger.atInfo() .addKeyValue(RETRY_NUMBER_KEY, attempts) .addKeyValue(INTERVAL_KEY, retryInterval.toMillis()) .log("Transient error occurred. Retrying.", throwable); retrySubscription = Mono.delay(retryInterval).subscribe(i -> { if (isDisposed()) { logger.atInfo() .addKeyValue(RETRY_NUMBER_KEY, attempts) .log("Not requesting from upstream. Processor is disposed."); } else { logger.atInfo() .addKeyValue(RETRY_NUMBER_KEY, attempts) .log("Requesting from upstream."); requestUpstream(); isRetryPending.set(false); } }); } else { logger.atWarning() .addKeyValue(RETRY_NUMBER_KEY, attempts) .log("Retry attempts exhausted or exception was not retriable.", throwable); lastError = throwable; isDisposed.set(true); dispose(); synchronized (lock) { final ConcurrentLinkedDeque<ChannelSubscriber<T>> currentSubscribers = subscribers; subscribers = new ConcurrentLinkedDeque<>(); logger.info("Error in AMQP channel processor. Notifying {} subscribers.", currentSubscribers.size()); currentSubscribers.forEach(subscriber -> subscriber.onError(throwable)); } } }
class AmqpChannelProcessor<T> extends Mono<T> implements Processor<T, T>, CoreSubscriber<T>, Disposable { @SuppressWarnings("rawtypes") private static final AtomicReferenceFieldUpdater<AmqpChannelProcessor, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(AmqpChannelProcessor.class, Subscription.class, "upstream"); private final ClientLogger logger; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicBoolean isRequested = new AtomicBoolean(); private final AtomicBoolean isRetryPending = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Object lock = new Object(); private final AmqpRetryPolicy retryPolicy; private final Function<T, Flux<AmqpEndpointState>> endpointStatesFunction; private final AmqpErrorContext errorContext; private volatile Subscription upstream; private volatile ConcurrentLinkedDeque<ChannelSubscriber<T>> subscribers = new ConcurrentLinkedDeque<>(); private volatile Throwable lastError; private volatile T currentChannel; private volatile Disposable connectionSubscription; private volatile Disposable retrySubscription; public AmqpChannelProcessor(String fullyQualifiedNamespace, String entityPath, Function<T, Flux<AmqpEndpointState>> endpointStatesFunction, AmqpRetryPolicy retryPolicy, ClientLogger logger) { this.endpointStatesFunction = Objects.requireNonNull(endpointStatesFunction, "'endpointStates' cannot be null."); this.retryPolicy = Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null."); /*Map<String, Object> loggingContext = new HashMap<>(); loggingContext.put(FULLY_QUALIFIED_NAMESPACE_KEY, Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null.")); loggingContext.put(ENTITY_PATH_KEY, Objects.requireNonNull(entityPath, "'entityPath' cannot be null."));*/ this.logger = Objects.requireNonNull(logger, "'retryPolicy' cannot be null."); this.errorContext = new AmqpErrorContext(fullyQualifiedNamespace); } @Override public void onSubscribe(Subscription subscription) { if (Operators.setOnce(UPSTREAM, this, subscription)) { isRequested.set(true); subscription.request(1); } else { logger.warning("Processors can only be subscribed to once."); } } @Override public void onNext(T amqpChannel) { logger.info("Setting next AMQP channel."); Objects.requireNonNull(amqpChannel, "'amqpChannel' cannot be null."); final T oldChannel; final Disposable oldSubscription; synchronized (lock) { oldChannel = currentChannel; oldSubscription = connectionSubscription; currentChannel = amqpChannel; final ConcurrentLinkedDeque<ChannelSubscriber<T>> currentSubscribers = subscribers; logger.info("Next AMQP channel received, updating {} current subscribers", subscribers.size()); currentSubscribers.forEach(subscription -> subscription.onNext(amqpChannel)); connectionSubscription = endpointStatesFunction.apply(amqpChannel).subscribe( state -> { if (state == AmqpEndpointState.ACTIVE) { retryAttempts.set(0); logger.info("Channel is now active."); } }, error -> { setAndClearChannel(); onError(error); }, () -> { if (isDisposed()) { logger.info("Channel is disposed."); } else { logger.info("Channel is closed. Requesting upstream."); setAndClearChannel(); requestUpstream(); } }); } close(oldChannel); if (oldSubscription != null) { oldSubscription.dispose(); } isRequested.set(false); } /** * When downstream or upstream encounters an error, calculates whether to request another item upstream. * * @param throwable Exception to analyse. */ @Override @Override public void onComplete() { logger.info("Upstream connection publisher was completed. Terminating processor."); isDisposed.set(true); synchronized (lock) { final ConcurrentLinkedDeque<ChannelSubscriber<T>> currentSubscribers = subscribers; subscribers = new ConcurrentLinkedDeque<>(); logger.info("AMQP channel processor completed. Notifying {} subscribers.", currentSubscribers.size()); currentSubscribers.forEach(subscriber -> subscriber.onComplete()); } } @Override public void subscribe(CoreSubscriber<? super T> actual) { if (isDisposed()) { if (lastError != null) { actual.onSubscribe(Operators.emptySubscription()); actual.onError(lastError); } else { Operators.error(actual, logger.logExceptionAsError(new IllegalStateException("Cannot subscribe. Processor is already terminated."))); } return; } final ChannelSubscriber<T> subscriber = new ChannelSubscriber<T>(actual, this); actual.onSubscribe(subscriber); synchronized (lock) { if (currentChannel != null) { subscriber.complete(currentChannel); return; } } subscribers.add(subscriber); logger.atVerbose().addKeyValue("subscribers", subscribers.size()).log("Added a subscriber."); if (!isRetryPending.get()) { requestUpstream(); } } @Override public void dispose() { if (isDisposed.getAndSet(true)) { return; } if (retrySubscription != null && !retrySubscription.isDisposed()) { retrySubscription.dispose(); } onComplete(); synchronized (lock) { setAndClearChannel(); } } @Override public boolean isDisposed() { return isDisposed.get(); } private void requestUpstream() { if (currentChannel != null) { logger.verbose("Connection exists, not requesting another."); return; } else if (isDisposed()) { logger.verbose("Is already disposed."); return; } final Subscription subscription = UPSTREAM.get(this); if (subscription == null) { logger.warning("There is no upstream subscription."); return; } if (!isRequested.getAndSet(true)) { logger.info("Connection not requested, yet. Requesting one."); subscription.request(1); } } private void setAndClearChannel() { T oldChannel; synchronized (lock) { oldChannel = currentChannel; currentChannel = null; } close(oldChannel); } /** * Checks the current state of the channel for this channel and returns true if the channel is null or if this * processor is disposed. * * @return true if the current channel in the processor is null or if the processor is disposed */ public boolean isChannelClosed() { synchronized (lock) { return currentChannel == null || isDisposed(); } } private void close(T channel) { if (channel instanceof AsyncCloseable) { ((AsyncCloseable) channel).closeAsync().subscribe(); } else if (channel instanceof AutoCloseable) { try { ((AutoCloseable) channel).close(); } catch (Exception error) { logger.warning("Error occurred closing AutoCloseable channel.", error); } } else if (channel instanceof Disposable) { try { ((Disposable) channel).dispose(); } catch (Exception error) { logger.warning("Error occurred closing Disposable channel.", error); } } } /** * Represents the decorator-subscriber wrapping a downstream subscriber to AmqpChannelProcessor. * These are the subscribers waiting to receive a channel that is yet to be available in the AmqpChannelProcessor. * The AmqpChannelProcessor tracks a list of such waiting subscribers; once the processor receives * a result (channel, error or disposal) from it's upstream, each decorated-subscriber will be notified, * which removes itself from the tracking list, then propagates the notification to the wrapped subscriber. */ private static final class ChannelSubscriber<T> extends Operators.MonoSubscriber<T, T> { private final AmqpChannelProcessor<T> processor; private ChannelSubscriber(CoreSubscriber<? super T> actual, AmqpChannelProcessor<T> processor) { super(actual); this.processor = processor; } @Override public void cancel() { processor.subscribers.remove(this); super.cancel(); } @Override public void onComplete() { if (!isCancelled()) { processor.subscribers.remove(this); actual.onComplete(); } } @Override public void onNext(T channel) { if (!isCancelled()) { processor.subscribers.remove(this); super.complete(channel); } } @Override public void onError(Throwable throwable) { if (!isCancelled()) { processor.subscribers.remove(this); actual.onError(throwable); } else { Operators.onErrorDropped(throwable, currentContext()); } } } }
class AmqpChannelProcessor<T> extends Mono<T> implements Processor<T, T>, CoreSubscriber<T>, Disposable { @SuppressWarnings("rawtypes") private static final AtomicReferenceFieldUpdater<AmqpChannelProcessor, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(AmqpChannelProcessor.class, Subscription.class, "upstream"); private static final String RETRY_NUMBER_KEY = "retry"; private final ClientLogger logger; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicBoolean isRequested = new AtomicBoolean(); private final AtomicBoolean isRetryPending = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Object lock = new Object(); private final AmqpRetryPolicy retryPolicy; private final Function<T, Flux<AmqpEndpointState>> endpointStatesFunction; private final AmqpErrorContext errorContext; private volatile Subscription upstream; private volatile ConcurrentLinkedDeque<ChannelSubscriber<T>> subscribers = new ConcurrentLinkedDeque<>(); private volatile Throwable lastError; private volatile T currentChannel; private volatile Disposable connectionSubscription; private volatile Disposable retrySubscription; /** * @deprecated Use constructor overload that does not take {@link ClientLogger} */ @Deprecated public AmqpChannelProcessor(String fullyQualifiedNamespace, String entityPath, Function<T, Flux<AmqpEndpointState>> endpointStatesFunction, AmqpRetryPolicy retryPolicy, ClientLogger logger) { this.endpointStatesFunction = Objects.requireNonNull(endpointStatesFunction, "'endpointStates' cannot be null."); this.retryPolicy = Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null."); this.logger = Objects.requireNonNull(logger, "'logger' cannot be null."); this.errorContext = new AmqpErrorContext(fullyQualifiedNamespace); } public AmqpChannelProcessor(String fullyQualifiedNamespace, Function<T, Flux<AmqpEndpointState>> endpointStatesFunction, AmqpRetryPolicy retryPolicy, Map<String, Object> loggingContext) { this.endpointStatesFunction = Objects.requireNonNull(endpointStatesFunction, "'endpointStates' cannot be null."); this.retryPolicy = Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null."); this.logger = new ClientLogger(getClass(), Objects.requireNonNull(loggingContext, "'loggingContext' cannot be null.")); this.errorContext = new AmqpErrorContext(fullyQualifiedNamespace); } @Override public void onSubscribe(Subscription subscription) { if (Operators.setOnce(UPSTREAM, this, subscription)) { isRequested.set(true); subscription.request(1); } else { logger.warning("Processors can only be subscribed to once."); } } @Override public void onNext(T amqpChannel) { logger.info("Setting next AMQP channel."); Objects.requireNonNull(amqpChannel, "'amqpChannel' cannot be null."); final T oldChannel; final Disposable oldSubscription; synchronized (lock) { oldChannel = currentChannel; oldSubscription = connectionSubscription; currentChannel = amqpChannel; final ConcurrentLinkedDeque<ChannelSubscriber<T>> currentSubscribers = subscribers; logger.info("Next AMQP channel received, updating {} current subscribers", subscribers.size()); currentSubscribers.forEach(subscription -> subscription.onNext(amqpChannel)); connectionSubscription = endpointStatesFunction.apply(amqpChannel).subscribe( state -> { if (state == AmqpEndpointState.ACTIVE) { retryAttempts.set(0); logger.info("Channel is now active."); } }, error -> { setAndClearChannel(); onError(error); }, () -> { if (isDisposed()) { logger.info("Channel is disposed."); } else { logger.info("Channel is closed. Requesting upstream."); setAndClearChannel(); requestUpstream(); } }); } close(oldChannel); if (oldSubscription != null) { oldSubscription.dispose(); } isRequested.set(false); } /** * When downstream or upstream encounters an error, calculates whether to request another item upstream. * * @param throwable Exception to analyse. */ @Override @Override public void onComplete() { logger.info("Upstream connection publisher was completed. Terminating processor."); isDisposed.set(true); synchronized (lock) { final ConcurrentLinkedDeque<ChannelSubscriber<T>> currentSubscribers = subscribers; subscribers = new ConcurrentLinkedDeque<>(); logger.info("AMQP channel processor completed. Notifying {} subscribers.", currentSubscribers.size()); currentSubscribers.forEach(subscriber -> subscriber.onComplete()); } } @Override public void subscribe(CoreSubscriber<? super T> actual) { if (isDisposed()) { if (lastError != null) { actual.onSubscribe(Operators.emptySubscription()); actual.onError(lastError); } else { Operators.error(actual, logger.logExceptionAsError(new IllegalStateException("Cannot subscribe. Processor is already terminated."))); } return; } final ChannelSubscriber<T> subscriber = new ChannelSubscriber<T>(actual, this); actual.onSubscribe(subscriber); synchronized (lock) { if (currentChannel != null) { subscriber.complete(currentChannel); return; } } subscribers.add(subscriber); logger.atVerbose().addKeyValue("subscribers", subscribers.size()).log("Added a subscriber."); if (!isRetryPending.get()) { requestUpstream(); } } @Override public void dispose() { if (isDisposed.getAndSet(true)) { return; } if (retrySubscription != null && !retrySubscription.isDisposed()) { retrySubscription.dispose(); } onComplete(); synchronized (lock) { setAndClearChannel(); } } @Override public boolean isDisposed() { return isDisposed.get(); } private void requestUpstream() { if (currentChannel != null) { logger.verbose("Connection exists, not requesting another."); return; } else if (isDisposed()) { logger.verbose("Is already disposed."); return; } final Subscription subscription = UPSTREAM.get(this); if (subscription == null) { logger.warning("There is no upstream subscription."); return; } if (!isRequested.getAndSet(true)) { logger.info("Connection not requested, yet. Requesting one."); subscription.request(1); } } private void setAndClearChannel() { T oldChannel; synchronized (lock) { oldChannel = currentChannel; currentChannel = null; } close(oldChannel); } /** * Checks the current state of the channel for this channel and returns true if the channel is null or if this * processor is disposed. * * @return true if the current channel in the processor is null or if the processor is disposed */ public boolean isChannelClosed() { synchronized (lock) { return currentChannel == null || isDisposed(); } } private void close(T channel) { if (channel instanceof AsyncCloseable) { ((AsyncCloseable) channel).closeAsync().subscribe(); } else if (channel instanceof AutoCloseable) { try { ((AutoCloseable) channel).close(); } catch (Exception error) { logger.warning("Error occurred closing AutoCloseable channel.", error); } } else if (channel instanceof Disposable) { try { ((Disposable) channel).dispose(); } catch (Exception error) { logger.warning("Error occurred closing Disposable channel.", error); } } } /** * Represents the decorator-subscriber wrapping a downstream subscriber to AmqpChannelProcessor. * These are the subscribers waiting to receive a channel that is yet to be available in the AmqpChannelProcessor. * The AmqpChannelProcessor tracks a list of such waiting subscribers; once the processor receives * a result (channel, error or disposal) from it's upstream, each decorated-subscriber will be notified, * which removes itself from the tracking list, then propagates the notification to the wrapped subscriber. */ private static final class ChannelSubscriber<T> extends Operators.MonoSubscriber<T, T> { private final AmqpChannelProcessor<T> processor; private ChannelSubscriber(CoreSubscriber<? super T> actual, AmqpChannelProcessor<T> processor) { super(actual); this.processor = processor; } @Override public void cancel() { processor.subscribers.remove(this); super.cancel(); } @Override public void onComplete() { if (!isCancelled()) { processor.subscribers.remove(this); actual.onComplete(); } } @Override public void onNext(T channel) { if (!isCancelled()) { processor.subscribers.remove(this); super.complete(channel); } } @Override public void onError(Throwable throwable) { if (!isCancelled()) { processor.subscribers.remove(this); actual.onError(throwable); } else { Operators.onErrorDropped(throwable, currentContext()); } } } }
Why where all the `namespace` and `entityPath` key-values removed in the logs in this file?
public void onNext(T amqpChannel) { logger.info("Setting next AMQP channel."); Objects.requireNonNull(amqpChannel, "'amqpChannel' cannot be null."); final T oldChannel; final Disposable oldSubscription; synchronized (lock) { oldChannel = currentChannel; oldSubscription = connectionSubscription; currentChannel = amqpChannel; final ConcurrentLinkedDeque<ChannelSubscriber<T>> currentSubscribers = subscribers; logger.info("Next AMQP channel received, updating {} current subscribers", subscribers.size()); currentSubscribers.forEach(subscription -> subscription.onNext(amqpChannel)); connectionSubscription = endpointStatesFunction.apply(amqpChannel).subscribe( state -> { if (state == AmqpEndpointState.ACTIVE) { retryAttempts.set(0); logger.info("Channel is now active."); } }, error -> { setAndClearChannel(); onError(error); }, () -> { if (isDisposed()) { logger.info("Channel is disposed."); } else { logger.info("Channel is closed. Requesting upstream."); setAndClearChannel(); requestUpstream(); } }); } close(oldChannel); if (oldSubscription != null) { oldSubscription.dispose(); } isRequested.set(false); }
retryAttempts.set(0);
public void onNext(T amqpChannel) { logger.info("Setting next AMQP channel."); Objects.requireNonNull(amqpChannel, "'amqpChannel' cannot be null."); final T oldChannel; final Disposable oldSubscription; synchronized (lock) { oldChannel = currentChannel; oldSubscription = connectionSubscription; currentChannel = amqpChannel; final ConcurrentLinkedDeque<ChannelSubscriber<T>> currentSubscribers = subscribers; logger.info("Next AMQP channel received, updating {} current subscribers", subscribers.size()); currentSubscribers.forEach(subscription -> subscription.onNext(amqpChannel)); connectionSubscription = endpointStatesFunction.apply(amqpChannel).subscribe( state -> { if (state == AmqpEndpointState.ACTIVE) { retryAttempts.set(0); logger.info("Channel is now active."); } }, error -> { setAndClearChannel(); onError(error); }, () -> { if (isDisposed()) { logger.info("Channel is disposed."); } else { logger.info("Channel is closed. Requesting upstream."); setAndClearChannel(); requestUpstream(); } }); } close(oldChannel); if (oldSubscription != null) { oldSubscription.dispose(); } isRequested.set(false); }
class AmqpChannelProcessor<T> extends Mono<T> implements Processor<T, T>, CoreSubscriber<T>, Disposable { @SuppressWarnings("rawtypes") private static final AtomicReferenceFieldUpdater<AmqpChannelProcessor, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(AmqpChannelProcessor.class, Subscription.class, "upstream"); private final ClientLogger logger; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicBoolean isRequested = new AtomicBoolean(); private final AtomicBoolean isRetryPending = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Object lock = new Object(); private final AmqpRetryPolicy retryPolicy; private final Function<T, Flux<AmqpEndpointState>> endpointStatesFunction; private final AmqpErrorContext errorContext; private volatile Subscription upstream; private volatile ConcurrentLinkedDeque<ChannelSubscriber<T>> subscribers = new ConcurrentLinkedDeque<>(); private volatile Throwable lastError; private volatile T currentChannel; private volatile Disposable connectionSubscription; private volatile Disposable retrySubscription; public AmqpChannelProcessor(String fullyQualifiedNamespace, String entityPath, Function<T, Flux<AmqpEndpointState>> endpointStatesFunction, AmqpRetryPolicy retryPolicy, ClientLogger logger) { this.endpointStatesFunction = Objects.requireNonNull(endpointStatesFunction, "'endpointStates' cannot be null."); this.retryPolicy = Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null."); /*Map<String, Object> loggingContext = new HashMap<>(); loggingContext.put(FULLY_QUALIFIED_NAMESPACE_KEY, Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null.")); loggingContext.put(ENTITY_PATH_KEY, Objects.requireNonNull(entityPath, "'entityPath' cannot be null."));*/ this.logger = Objects.requireNonNull(logger, "'retryPolicy' cannot be null."); this.errorContext = new AmqpErrorContext(fullyQualifiedNamespace); } @Override public void onSubscribe(Subscription subscription) { if (Operators.setOnce(UPSTREAM, this, subscription)) { isRequested.set(true); subscription.request(1); } else { logger.warning("Processors can only be subscribed to once."); } } @Override /** * When downstream or upstream encounters an error, calculates whether to request another item upstream. * * @param throwable Exception to analyse. */ @Override public void onError(Throwable throwable) { Objects.requireNonNull(throwable, "'throwable' is required."); if (isRetryPending.get() && retryPolicy.calculateRetryDelay(throwable, retryAttempts.get()) != null) { logger.warning("Retry is already pending. Ignoring transient error.", throwable); return; } final int attemptsMade = retryAttempts.incrementAndGet(); final int attempts; final Duration retryInterval; if (((throwable instanceof AmqpException) && ((AmqpException) throwable).isTransient()) || (throwable instanceof IllegalStateException) || (throwable instanceof RejectedExecutionException)) { attempts = Math.min(attemptsMade, retryPolicy.getMaxRetries()); final Throwable throwableToUse = throwable instanceof AmqpException ? throwable : new AmqpException(true, "Non-AmqpException occurred upstream.", throwable, errorContext); retryInterval = retryPolicy.calculateRetryDelay(throwableToUse, attempts); } else { attempts = attemptsMade; retryInterval = retryPolicy.calculateRetryDelay(throwable, attempts); } if (retryInterval != null) { if (isRetryPending.getAndSet(true)) { retryAttempts.decrementAndGet(); return; } logger.info("Retry retrySubscription = Mono.delay(retryInterval).subscribe(i -> { if (isDisposed()) { logger.info("Retry } else { logger.info("Retry requestUpstream(); isRetryPending.set(false); } }); } else { logger.warning("Retry lastError = throwable; isDisposed.set(true); dispose(); synchronized (lock) { final ConcurrentLinkedDeque<ChannelSubscriber<T>> currentSubscribers = subscribers; subscribers = new ConcurrentLinkedDeque<>(); logger.info("Error in AMQP channel processor. Notifying {} subscribers.", currentSubscribers.size()); currentSubscribers.forEach(subscriber -> subscriber.onError(throwable)); } } } @Override public void onComplete() { logger.info("Upstream connection publisher was completed. Terminating processor."); isDisposed.set(true); synchronized (lock) { final ConcurrentLinkedDeque<ChannelSubscriber<T>> currentSubscribers = subscribers; subscribers = new ConcurrentLinkedDeque<>(); logger.info("AMQP channel processor completed. Notifying {} subscribers.", currentSubscribers.size()); currentSubscribers.forEach(subscriber -> subscriber.onComplete()); } } @Override public void subscribe(CoreSubscriber<? super T> actual) { if (isDisposed()) { if (lastError != null) { actual.onSubscribe(Operators.emptySubscription()); actual.onError(lastError); } else { Operators.error(actual, logger.logExceptionAsError(new IllegalStateException("Cannot subscribe. Processor is already terminated."))); } return; } final ChannelSubscriber<T> subscriber = new ChannelSubscriber<T>(actual, this); actual.onSubscribe(subscriber); synchronized (lock) { if (currentChannel != null) { subscriber.complete(currentChannel); return; } } subscribers.add(subscriber); logger.atVerbose().addKeyValue("subscribers", subscribers.size()).log("Added a subscriber."); if (!isRetryPending.get()) { requestUpstream(); } } @Override public void dispose() { if (isDisposed.getAndSet(true)) { return; } if (retrySubscription != null && !retrySubscription.isDisposed()) { retrySubscription.dispose(); } onComplete(); synchronized (lock) { setAndClearChannel(); } } @Override public boolean isDisposed() { return isDisposed.get(); } private void requestUpstream() { if (currentChannel != null) { logger.verbose("Connection exists, not requesting another."); return; } else if (isDisposed()) { logger.verbose("Is already disposed."); return; } final Subscription subscription = UPSTREAM.get(this); if (subscription == null) { logger.warning("There is no upstream subscription."); return; } if (!isRequested.getAndSet(true)) { logger.info("Connection not requested, yet. Requesting one."); subscription.request(1); } } private void setAndClearChannel() { T oldChannel; synchronized (lock) { oldChannel = currentChannel; currentChannel = null; } close(oldChannel); } /** * Checks the current state of the channel for this channel and returns true if the channel is null or if this * processor is disposed. * * @return true if the current channel in the processor is null or if the processor is disposed */ public boolean isChannelClosed() { synchronized (lock) { return currentChannel == null || isDisposed(); } } private void close(T channel) { if (channel instanceof AsyncCloseable) { ((AsyncCloseable) channel).closeAsync().subscribe(); } else if (channel instanceof AutoCloseable) { try { ((AutoCloseable) channel).close(); } catch (Exception error) { logger.warning("Error occurred closing AutoCloseable channel.", error); } } else if (channel instanceof Disposable) { try { ((Disposable) channel).dispose(); } catch (Exception error) { logger.warning("Error occurred closing Disposable channel.", error); } } } /** * Represents the decorator-subscriber wrapping a downstream subscriber to AmqpChannelProcessor. * These are the subscribers waiting to receive a channel that is yet to be available in the AmqpChannelProcessor. * The AmqpChannelProcessor tracks a list of such waiting subscribers; once the processor receives * a result (channel, error or disposal) from it's upstream, each decorated-subscriber will be notified, * which removes itself from the tracking list, then propagates the notification to the wrapped subscriber. */ private static final class ChannelSubscriber<T> extends Operators.MonoSubscriber<T, T> { private final AmqpChannelProcessor<T> processor; private ChannelSubscriber(CoreSubscriber<? super T> actual, AmqpChannelProcessor<T> processor) { super(actual); this.processor = processor; } @Override public void cancel() { processor.subscribers.remove(this); super.cancel(); } @Override public void onComplete() { if (!isCancelled()) { processor.subscribers.remove(this); actual.onComplete(); } } @Override public void onNext(T channel) { if (!isCancelled()) { processor.subscribers.remove(this); super.complete(channel); } } @Override public void onError(Throwable throwable) { if (!isCancelled()) { processor.subscribers.remove(this); actual.onError(throwable); } else { Operators.onErrorDropped(throwable, currentContext()); } } } }
class AmqpChannelProcessor<T> extends Mono<T> implements Processor<T, T>, CoreSubscriber<T>, Disposable { @SuppressWarnings("rawtypes") private static final AtomicReferenceFieldUpdater<AmqpChannelProcessor, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(AmqpChannelProcessor.class, Subscription.class, "upstream"); private static final String RETRY_NUMBER_KEY = "retry"; private final ClientLogger logger; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicBoolean isRequested = new AtomicBoolean(); private final AtomicBoolean isRetryPending = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Object lock = new Object(); private final AmqpRetryPolicy retryPolicy; private final Function<T, Flux<AmqpEndpointState>> endpointStatesFunction; private final AmqpErrorContext errorContext; private volatile Subscription upstream; private volatile ConcurrentLinkedDeque<ChannelSubscriber<T>> subscribers = new ConcurrentLinkedDeque<>(); private volatile Throwable lastError; private volatile T currentChannel; private volatile Disposable connectionSubscription; private volatile Disposable retrySubscription; /** * @deprecated Use constructor overload that does not take {@link ClientLogger} */ @Deprecated public AmqpChannelProcessor(String fullyQualifiedNamespace, String entityPath, Function<T, Flux<AmqpEndpointState>> endpointStatesFunction, AmqpRetryPolicy retryPolicy, ClientLogger logger) { this.endpointStatesFunction = Objects.requireNonNull(endpointStatesFunction, "'endpointStates' cannot be null."); this.retryPolicy = Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null."); this.logger = Objects.requireNonNull(logger, "'logger' cannot be null."); this.errorContext = new AmqpErrorContext(fullyQualifiedNamespace); } public AmqpChannelProcessor(String fullyQualifiedNamespace, Function<T, Flux<AmqpEndpointState>> endpointStatesFunction, AmqpRetryPolicy retryPolicy, Map<String, Object> loggingContext) { this.endpointStatesFunction = Objects.requireNonNull(endpointStatesFunction, "'endpointStates' cannot be null."); this.retryPolicy = Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null."); this.logger = new ClientLogger(getClass(), Objects.requireNonNull(loggingContext, "'loggingContext' cannot be null.")); this.errorContext = new AmqpErrorContext(fullyQualifiedNamespace); } @Override public void onSubscribe(Subscription subscription) { if (Operators.setOnce(UPSTREAM, this, subscription)) { isRequested.set(true); subscription.request(1); } else { logger.warning("Processors can only be subscribed to once."); } } @Override /** * When downstream or upstream encounters an error, calculates whether to request another item upstream. * * @param throwable Exception to analyse. */ @Override public void onError(Throwable throwable) { Objects.requireNonNull(throwable, "'throwable' is required."); if (isRetryPending.get() && retryPolicy.calculateRetryDelay(throwable, retryAttempts.get()) != null) { logger.warning("Retry is already pending. Ignoring transient error.", throwable); return; } final int attemptsMade = retryAttempts.incrementAndGet(); final int attempts; final Duration retryInterval; if (((throwable instanceof AmqpException) && ((AmqpException) throwable).isTransient()) || (throwable instanceof IllegalStateException) || (throwable instanceof RejectedExecutionException)) { attempts = Math.min(attemptsMade, retryPolicy.getMaxRetries()); final Throwable throwableToUse = throwable instanceof AmqpException ? throwable : new AmqpException(true, "Non-AmqpException occurred upstream.", throwable, errorContext); retryInterval = retryPolicy.calculateRetryDelay(throwableToUse, attempts); } else { attempts = attemptsMade; retryInterval = retryPolicy.calculateRetryDelay(throwable, attempts); } if (retryInterval != null) { if (isRetryPending.getAndSet(true)) { retryAttempts.decrementAndGet(); return; } logger.atInfo() .addKeyValue(RETRY_NUMBER_KEY, attempts) .addKeyValue(INTERVAL_KEY, retryInterval.toMillis()) .log("Transient error occurred. Retrying.", throwable); retrySubscription = Mono.delay(retryInterval).subscribe(i -> { if (isDisposed()) { logger.atInfo() .addKeyValue(RETRY_NUMBER_KEY, attempts) .log("Not requesting from upstream. Processor is disposed."); } else { logger.atInfo() .addKeyValue(RETRY_NUMBER_KEY, attempts) .log("Requesting from upstream."); requestUpstream(); isRetryPending.set(false); } }); } else { logger.atWarning() .addKeyValue(RETRY_NUMBER_KEY, attempts) .log("Retry attempts exhausted or exception was not retriable.", throwable); lastError = throwable; isDisposed.set(true); dispose(); synchronized (lock) { final ConcurrentLinkedDeque<ChannelSubscriber<T>> currentSubscribers = subscribers; subscribers = new ConcurrentLinkedDeque<>(); logger.info("Error in AMQP channel processor. Notifying {} subscribers.", currentSubscribers.size()); currentSubscribers.forEach(subscriber -> subscriber.onError(throwable)); } } } @Override public void onComplete() { logger.info("Upstream connection publisher was completed. Terminating processor."); isDisposed.set(true); synchronized (lock) { final ConcurrentLinkedDeque<ChannelSubscriber<T>> currentSubscribers = subscribers; subscribers = new ConcurrentLinkedDeque<>(); logger.info("AMQP channel processor completed. Notifying {} subscribers.", currentSubscribers.size()); currentSubscribers.forEach(subscriber -> subscriber.onComplete()); } } @Override public void subscribe(CoreSubscriber<? super T> actual) { if (isDisposed()) { if (lastError != null) { actual.onSubscribe(Operators.emptySubscription()); actual.onError(lastError); } else { Operators.error(actual, logger.logExceptionAsError(new IllegalStateException("Cannot subscribe. Processor is already terminated."))); } return; } final ChannelSubscriber<T> subscriber = new ChannelSubscriber<T>(actual, this); actual.onSubscribe(subscriber); synchronized (lock) { if (currentChannel != null) { subscriber.complete(currentChannel); return; } } subscribers.add(subscriber); logger.atVerbose().addKeyValue("subscribers", subscribers.size()).log("Added a subscriber."); if (!isRetryPending.get()) { requestUpstream(); } } @Override public void dispose() { if (isDisposed.getAndSet(true)) { return; } if (retrySubscription != null && !retrySubscription.isDisposed()) { retrySubscription.dispose(); } onComplete(); synchronized (lock) { setAndClearChannel(); } } @Override public boolean isDisposed() { return isDisposed.get(); } private void requestUpstream() { if (currentChannel != null) { logger.verbose("Connection exists, not requesting another."); return; } else if (isDisposed()) { logger.verbose("Is already disposed."); return; } final Subscription subscription = UPSTREAM.get(this); if (subscription == null) { logger.warning("There is no upstream subscription."); return; } if (!isRequested.getAndSet(true)) { logger.info("Connection not requested, yet. Requesting one."); subscription.request(1); } } private void setAndClearChannel() { T oldChannel; synchronized (lock) { oldChannel = currentChannel; currentChannel = null; } close(oldChannel); } /** * Checks the current state of the channel for this channel and returns true if the channel is null or if this * processor is disposed. * * @return true if the current channel in the processor is null or if the processor is disposed */ public boolean isChannelClosed() { synchronized (lock) { return currentChannel == null || isDisposed(); } } private void close(T channel) { if (channel instanceof AsyncCloseable) { ((AsyncCloseable) channel).closeAsync().subscribe(); } else if (channel instanceof AutoCloseable) { try { ((AutoCloseable) channel).close(); } catch (Exception error) { logger.warning("Error occurred closing AutoCloseable channel.", error); } } else if (channel instanceof Disposable) { try { ((Disposable) channel).dispose(); } catch (Exception error) { logger.warning("Error occurred closing Disposable channel.", error); } } } /** * Represents the decorator-subscriber wrapping a downstream subscriber to AmqpChannelProcessor. * These are the subscribers waiting to receive a channel that is yet to be available in the AmqpChannelProcessor. * The AmqpChannelProcessor tracks a list of such waiting subscribers; once the processor receives * a result (channel, error or disposal) from it's upstream, each decorated-subscriber will be notified, * which removes itself from the tracking list, then propagates the notification to the wrapped subscriber. */ private static final class ChannelSubscriber<T> extends Operators.MonoSubscriber<T, T> { private final AmqpChannelProcessor<T> processor; private ChannelSubscriber(CoreSubscriber<? super T> actual, AmqpChannelProcessor<T> processor) { super(actual); this.processor = processor; } @Override public void cancel() { processor.subscribers.remove(this); super.cancel(); } @Override public void onComplete() { if (!isCancelled()) { processor.subscribers.remove(this); actual.onComplete(); } } @Override public void onNext(T channel) { if (!isCancelled()) { processor.subscribers.remove(this); super.complete(channel); } } @Override public void onError(Throwable throwable) { if (!isCancelled()) { processor.subscribers.remove(this); actual.onError(throwable); } else { Operators.onErrorDropped(throwable, currentContext()); } } } }
```suggestion logger.warning("IOException while scheduling closeConnection work. Manually disposing.", e); ```
Mono<Void> closeAsync(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal).log("Disposing of ReactorConnection."); final Sinks.EmitResult result = shutdownSignalSink.tryEmitValue(shutdownSignal); if (result.isFailure()) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(EMIT_RESULT_KEY, result) .log("Unable to emit shutdown signal."); } final Mono<Void> cbsCloseOperation; if (cbsChannelProcessor != null) { cbsCloseOperation = cbsChannelProcessor.flatMap(channel -> channel.closeAsync()); } else { cbsCloseOperation = Mono.empty(); } final Mono<Void> managementNodeCloseOperations = Mono.when( Flux.fromStream(managementNodes.values().stream()).flatMap(node -> node.closeAsync())); final Mono<Void> closeReactor = Mono.fromRunnable(() -> { logger.verbose("Scheduling closeConnection work."); final ReactorDispatcher dispatcher = reactorProvider.getReactorDispatcher(); if (dispatcher != null) { try { dispatcher.invoke(() -> closeConnectionWork()); } catch (IOException e) { logger.warning("IOException while scheduling closeConnection work. Manually disposing", e); closeConnectionWork(); } catch (RejectedExecutionException e) { logger.info("Could not schedule closeConnection work. Manually disposing."); closeConnectionWork(); } } else { closeConnectionWork(); } }); return Mono.whenDelayError( cbsCloseOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed CBS node.")), managementNodeCloseOperations.doFinally(signalType -> logger.atVerbose() .log("Closed management nodes."))) .then(closeReactor.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed reactor dispatcher."))) .then(isClosedMono.asMono()); }
logger.warning("IOException while scheduling closeConnection work. Manually disposing", e);
Mono<Void> closeAsync(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal).log("Disposing of ReactorConnection."); final Sinks.EmitResult result = shutdownSignalSink.tryEmitValue(shutdownSignal); if (result.isFailure()) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(EMIT_RESULT_KEY, result) .log("Unable to emit shutdown signal."); } final Mono<Void> cbsCloseOperation; if (cbsChannelProcessor != null) { cbsCloseOperation = cbsChannelProcessor.flatMap(channel -> channel.closeAsync()); } else { cbsCloseOperation = Mono.empty(); } final Mono<Void> managementNodeCloseOperations = Mono.when( Flux.fromStream(managementNodes.values().stream()).flatMap(node -> node.closeAsync())); final Mono<Void> closeReactor = Mono.fromRunnable(() -> { logger.verbose("Scheduling closeConnection work."); final ReactorDispatcher dispatcher = reactorProvider.getReactorDispatcher(); if (dispatcher != null) { try { dispatcher.invoke(() -> closeConnectionWork()); } catch (IOException e) { logger.warning("IOException while scheduling closeConnection work. Manually disposing.", e); closeConnectionWork(); } catch (RejectedExecutionException e) { logger.info("Could not schedule closeConnection work. Manually disposing."); closeConnectionWork(); } } else { closeConnectionWork(); } }); return Mono.whenDelayError( cbsCloseOperation.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed CBS node.")), managementNodeCloseOperations.doFinally(signalType -> logger.atVerbose() .log("Closed management nodes."))) .then(closeReactor.doFinally(signalType -> logger.atVerbose() .addKeyValue(SIGNAL_TYPE_KEY, signalType) .log("Closed reactor dispatcher."))) .then(isClosedMono.asMono()); }
class ReactorConnection implements AmqpConnection { private static final String CBS_SESSION_NAME = "cbs-session"; private static final String CBS_ADDRESS = "$cbs"; private static final String CBS_LINK_NAME = "cbs"; private static final String MANAGEMENT_SESSION_NAME = "mgmt-session"; private static final String MANAGEMENT_ADDRESS = "$management"; private static final String MANAGEMENT_LINK_NAME = "mgmt"; private final ClientLogger logger; private final ConcurrentMap<String, SessionSubscription> sessionMap = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, AmqpManagementNode> managementNodes = new ConcurrentHashMap<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.One<AmqpShutdownSignal> shutdownSignalSink = Sinks.one(); private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final String connectionId; private final Mono<Connection> connectionMono; private final ConnectionHandler handler; private final ReactorHandlerProvider handlerProvider; private final TokenManagerProvider tokenManagerProvider; private final MessageSerializer messageSerializer; private final ConnectionOptions connectionOptions; private final ReactorProvider reactorProvider; private final AmqpRetryPolicy retryPolicy; private final SenderSettleMode senderSettleMode; private final ReceiverSettleMode receiverSettleMode; private final Duration operationTimeout; private final Composite subscriptions; private ReactorExecutor executor; private volatile ClaimsBasedSecurityChannel cbsChannel; private volatile AmqpChannelProcessor<RequestResponseChannel> cbsChannelProcessor; private volatile Connection connection; /** * Creates a new AMQP connection that uses proton-j. * * @param connectionId Identifier for the connection. * @param connectionOptions A set of options used to create the AMQP connection. * @param reactorProvider Provides proton-j Reactor instances. * @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events. * @param tokenManagerProvider Provides the appropriate token manager to authorize with CBS node. * @param messageSerializer Serializer to translate objects to and from proton-j {@link Message messages}. * @param senderSettleMode to set as {@link SenderSettleMode} on sender. * @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver. */ public ReactorConnection(String connectionId, ConnectionOptions connectionOptions, ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider, TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode) { this.connectionOptions = connectionOptions; this.reactorProvider = reactorProvider; this.connectionId = connectionId; this.logger = new ClientLogger(ReactorConnection.class, createContextWithConnectionId(connectionId)); this.handlerProvider = handlerProvider; this.tokenManagerProvider = Objects.requireNonNull(tokenManagerProvider, "'tokenManagerProvider' cannot be null."); this.messageSerializer = messageSerializer; this.handler = handlerProvider.createConnectionHandler(connectionId, connectionOptions); this.retryPolicy = RetryUtil.getRetryPolicy(connectionOptions.getRetry()); this.operationTimeout = connectionOptions.getRetry().getTryTimeout(); this.senderSettleMode = senderSettleMode; this.receiverSettleMode = receiverSettleMode; this.connectionMono = Mono.fromCallable(this::getOrCreateConnection) .flatMap(reactorConnection -> { final Mono<AmqpEndpointState> activeEndpoint = getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(operationTimeout, Mono.error(new AmqpException(true, String.format( "Connection '%s' not opened within AmqpRetryOptions.tryTimeout(): %s", connectionId, operationTimeout), handler.getErrorContext()))); return activeEndpoint.thenReturn(reactorConnection); }) .doOnError(error -> { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already disposed: Error occurred while connection was starting.", error); } else { closeAsync(new AmqpShutdownSignal(false, false, String.format( "Error occurred while connection was starting. Error: %s", error))).subscribe(); } }); this.endpointStates = this.handler.getEndpointStates() .takeUntilOther(shutdownSignalSink.asMono()) .map(state -> { logger.verbose("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .onErrorResume(error -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to error."); return closeAsync(new AmqpShutdownSignal(false, false, error.getMessage())).then(Mono.error(error)); } else { return Mono.error(error); } }) .doOnComplete(() -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to connection close."); closeAsync(new AmqpShutdownSignal(false, false, "Connection handler closed.")).subscribe(); } }) .cache(1); this.subscriptions = Disposables.composite(this.endpointStates.subscribe()); } /** * {@inheritDoc} */ @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } /** * Gets the shutdown signal associated with this connection. When it emits, the underlying connection is closed. * * @return Shutdown signals associated with this connection. It emits a signal when the underlying connection is * closed. */ @Override public Flux<AmqpShutdownSignal> getShutdownSignals() { return shutdownSignalSink.asMono().cache().flux(); } @Override public Mono<AmqpManagementNode> getManagementNode(String entityPath) { return Mono.defer(() -> { if (isDisposed()) { return monoError(logger.atError().addKeyValue(ENTITY_PATH_KEY, entityPath), new IllegalStateException("Connection is disposed. Cannot get management instance.")); } final AmqpManagementNode existing = managementNodes.get(entityPath); if (existing != null) { return Mono.just(existing); } final TokenManager tokenManager = new AzureTokenManagerProvider(connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()) .getTokenManager(getClaimsBasedSecurityNode(), entityPath); return tokenManager.authorize().thenReturn(managementNodes.compute(entityPath, (key, current) -> { if (current != null) { logger.info("A management node exists already, returning it."); tokenManager.close(); return current; } final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME; final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME; final String address = entityPath + "/" + MANAGEMENT_ADDRESS; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue("address", address) .log("Creating management node."); final AmqpChannelProcessor<RequestResponseChannel> requestResponseChannel = createRequestResponseChannel(sessionName, linkName, address); return new ManagementChannel(requestResponseChannel, getFullyQualifiedNamespace(), entityPath, tokenManager); })); }); } /** * {@inheritDoc} */ @Override public Mono<ClaimsBasedSecurityNode> getClaimsBasedSecurityNode() { return connectionMono.then(Mono.fromCallable(() -> getOrCreateCBSNode())); } @Override public String getId() { return connectionId; } /** * {@inheritDoc} */ @Override public String getFullyQualifiedNamespace() { return handler.getHostname(); } /** * {@inheritDoc} */ @Override public int getMaxFrameSize() { return handler.getMaxFrameSize(); } /** * {@inheritDoc} */ @Override public Map<String, Object> getConnectionProperties() { return handler.getConnectionProperties(); } /** * {@inheritDoc} */ @Override public Mono<AmqpSession> createSession(String sessionName) { return connectionMono.map(connection -> { final SessionSubscription sessionSubscription = sessionMap.computeIfAbsent(sessionName, key -> { final SessionHandler sessionHandler = handlerProvider.createSessionHandler(connectionId, getFullyQualifiedNamespace(), key, connectionOptions.getRetry().getTryTimeout()); final Session session = connection.session(); BaseHandler.setHandler(session, sessionHandler); final AmqpSession amqpSession = createSession(key, session, sessionHandler); final Disposable subscription = amqpSession.getEndpointStates() .subscribe(state -> { }, error -> { if (isDisposed.get()) { return; } logger.atInfo() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Error occurred. Removing and disposing session", error); removeSession(key); }, () -> { if (isDisposed.get()) { return; } logger.atVerbose() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Complete. Removing and disposing session."); removeSession(key); }); return new SessionSubscription(amqpSession, subscription); }); return sessionSubscription; }).flatMap(sessionSubscription -> { final Mono<AmqpEndpointState> activeSession = sessionSubscription.getSession().getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(retryPolicy.getRetryOptions().getTryTimeout(), Mono.error(new AmqpException(true, String.format("connectionId[%s] sessionName[%s] Timeout waiting for session to be active.", connectionId, sessionName), handler.getErrorContext()))); return activeSession.thenReturn(sessionSubscription.getSession()); }); } /** * Creates a new AMQP session with the given parameters. * * @param sessionName Name of the AMQP session. * @param session The reactor session associated with this session. * @param handler Session handler for the reactor session. * * @return A new instance of AMQP session. */ protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) { return new ReactorSession(this, session, handler, sessionName, reactorProvider, handlerProvider, getClaimsBasedSecurityNode(), tokenManagerProvider, messageSerializer, connectionOptions.getRetry()); } /** * {@inheritDoc} */ @Override public boolean removeSession(String sessionName) { if (sessionName == null) { return false; } final SessionSubscription removed = sessionMap.remove(sessionName); if (removed != null) { removed.dispose(); } return removed != null; } @Override public boolean isDisposed() { return isDisposed.get(); } /** * {@inheritDoc} */ @Override public void dispose() { final Duration timeout = operationTimeout.plus(operationTimeout); closeAsync().block(timeout); } /** * Gets the active AMQP connection for this instance. * * @return The AMQP connection. * * @throws AmqpException if the {@link Connection} was not transitioned to an active state within the given * {@link AmqpRetryOptions */ protected Mono<Connection> getReactorConnection() { return connectionMono; } /** * Creates a bidirectional link between the message broker and the client. * * @param sessionName Name of the session. * @param linkName Name of the link. * @param entityPath Address to the message broker. * * @return A new {@link RequestResponseChannel} to communicate with the message broker. */ protected AmqpChannelProcessor<RequestResponseChannel> createRequestResponseChannel(String sessionName, String linkName, String entityPath) { final Flux<RequestResponseChannel> createChannel = createSession(sessionName) .cast(ReactorSession.class) .map(reactorSession -> new RequestResponseChannel(this, getId(), getFullyQualifiedNamespace(), linkName, entityPath, reactorSession.session(), connectionOptions.getRetry(), handlerProvider, reactorProvider, messageSerializer, senderSettleMode, receiverSettleMode)) .doOnNext(e -> { logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .log("Emitting new response channel."); }) .repeat(); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(ENTITY_PATH_KEY, Objects.requireNonNull(entityPath, "'entityPath' cannot be null.")); return createChannel .subscribeWith(new AmqpChannelProcessor<>(connectionId, entityPath, channel -> channel.getEndpointStates(), retryPolicy, new ClientLogger(RequestResponseChannel.class, loggingContext))); } @Override public Mono<Void> closeAsync() { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already closed. Not disposing again."); return isClosedMono.asMono(); } return closeAsync(new AmqpShutdownSignal(false, true, "Disposed by client.")); } private synchronized void closeConnectionWork() { if (connection == null) { isClosedMono.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atInfo(), signalType, emitResult) .log("Unable to complete closeMono."); return false; }); return; } connection.close(); handler.close(); final ArrayList<Mono<Void>> closingSessions = new ArrayList<>(); sessionMap.values().forEach(link -> closingSessions.add(link.isClosed())); final Mono<Void> closedExecutor = executor != null ? Mono.defer(() -> { synchronized (this) { logger.info("Closing executor."); return executor.closeAsync(); } }) : Mono.empty(); final Mono<Void> closeSessionAndExecutorMono = Mono.when(closingSessions) .timeout(operationTimeout) .onErrorResume(error -> { logger.info("Timed out waiting for all sessions to close."); return Mono.empty(); }) .then(closedExecutor) .then(Mono.fromRunnable(() -> { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit connection closed signal."); return false; }); subscriptions.dispose(); })); subscriptions.add(closeSessionAndExecutorMono.subscribe()); } private synchronized ClaimsBasedSecurityNode getOrCreateCBSNode() { if (cbsChannel == null) { logger.info("Setting CBS channel."); cbsChannelProcessor = createRequestResponseChannel(CBS_SESSION_NAME, CBS_LINK_NAME, CBS_ADDRESS); cbsChannel = new ClaimsBasedSecurityChannel( cbsChannelProcessor, connectionOptions.getTokenCredential(), connectionOptions.getAuthorizationType(), connectionOptions.getRetry()); } return cbsChannel; } private synchronized Connection getOrCreateConnection() throws IOException { if (connection == null) { logger.atInfo() .addKeyValue(HOSTNAME_KEY, handler.getHostname()) .addKeyValue("port", handler.getProtocolPort()) .log("Creating and starting connection."); final Reactor reactor = reactorProvider.createReactor(connectionId, handler.getMaxFrameSize()); connection = reactor.connectionToHost(handler.getHostname(), handler.getProtocolPort(), handler); final ReactorExceptionHandler reactorExceptionHandler = new ReactorExceptionHandler(); final Duration timeoutDivided = connectionOptions.getRetry().getTryTimeout().dividedBy(2); final Duration pendingTasksDuration = ClientConstants.SERVER_BUSY_WAIT_TIME.compareTo(timeoutDivided) < 0 ? ClientConstants.SERVER_BUSY_WAIT_TIME : timeoutDivided; final Scheduler scheduler = Schedulers.newSingle("reactor-executor"); executor = new ReactorExecutor(reactor, scheduler, connectionId, reactorExceptionHandler, pendingTasksDuration, connectionOptions.getFullyQualifiedNamespace()); final Mono<Void> executorCloseMono = Mono.defer(() -> { synchronized (this) { return executor.closeAsync(); } }); reactorProvider.getReactorDispatcher().getShutdownSignal() .flatMap(signal -> { reactorExceptionHandler.onConnectionShutdown(signal); return executorCloseMono; }) .onErrorResume(error -> { reactorExceptionHandler.onConnectionError(error); return executorCloseMono; }) .subscribe(); executor.start(); } return connection; } private final class ReactorExceptionHandler extends AmqpExceptionHandler { private ReactorExceptionHandler() { super(); } @Override public void onConnectionError(Throwable exception) { logger.atInfo() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionError, Starting new reactor", exception); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onReactorError: Disposing."); closeAsync(new AmqpShutdownSignal(false, false, "onReactorError: " + exception.toString())) .subscribe(); } } @Override void onConnectionShutdown(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown. Shutting down."); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown: disposing."); closeAsync(shutdownSignal).subscribe(); } } } private static final class SessionSubscription { private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AmqpSession session; private final Disposable subscription; private SessionSubscription(AmqpSession session, Disposable subscription) { this.session = session; this.subscription = subscription; } private AmqpSession getSession() { return session; } private void dispose() { if (isDisposed.getAndSet(true)) { return; } if (session instanceof ReactorSession) { ((ReactorSession) session).closeAsync("Closing session.", null, true) .subscribe(); } else { session.dispose(); } subscription.dispose(); } private Mono<Void> isClosed() { if (session instanceof ReactorSession) { return ((ReactorSession) session).isClosed(); } else { return Mono.empty(); } } } }
class ReactorConnection implements AmqpConnection { private static final String CBS_SESSION_NAME = "cbs-session"; private static final String CBS_ADDRESS = "$cbs"; private static final String CBS_LINK_NAME = "cbs"; private static final String MANAGEMENT_SESSION_NAME = "mgmt-session"; private static final String MANAGEMENT_ADDRESS = "$management"; private static final String MANAGEMENT_LINK_NAME = "mgmt"; private final ClientLogger logger; private final ConcurrentMap<String, SessionSubscription> sessionMap = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, AmqpManagementNode> managementNodes = new ConcurrentHashMap<>(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.One<AmqpShutdownSignal> shutdownSignalSink = Sinks.one(); private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final String connectionId; private final Mono<Connection> connectionMono; private final ConnectionHandler handler; private final ReactorHandlerProvider handlerProvider; private final TokenManagerProvider tokenManagerProvider; private final MessageSerializer messageSerializer; private final ConnectionOptions connectionOptions; private final ReactorProvider reactorProvider; private final AmqpRetryPolicy retryPolicy; private final SenderSettleMode senderSettleMode; private final ReceiverSettleMode receiverSettleMode; private final Duration operationTimeout; private final Composite subscriptions; private ReactorExecutor executor; private volatile ClaimsBasedSecurityChannel cbsChannel; private volatile AmqpChannelProcessor<RequestResponseChannel> cbsChannelProcessor; private volatile Connection connection; /** * Creates a new AMQP connection that uses proton-j. * * @param connectionId Identifier for the connection. * @param connectionOptions A set of options used to create the AMQP connection. * @param reactorProvider Provides proton-j Reactor instances. * @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events. * @param tokenManagerProvider Provides the appropriate token manager to authorize with CBS node. * @param messageSerializer Serializer to translate objects to and from proton-j {@link Message messages}. * @param senderSettleMode to set as {@link SenderSettleMode} on sender. * @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver. */ public ReactorConnection(String connectionId, ConnectionOptions connectionOptions, ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider, TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode) { this.connectionOptions = connectionOptions; this.reactorProvider = reactorProvider; this.connectionId = connectionId; this.logger = new ClientLogger(ReactorConnection.class, createContextWithConnectionId(connectionId)); this.handlerProvider = handlerProvider; this.tokenManagerProvider = Objects.requireNonNull(tokenManagerProvider, "'tokenManagerProvider' cannot be null."); this.messageSerializer = messageSerializer; this.handler = handlerProvider.createConnectionHandler(connectionId, connectionOptions); this.retryPolicy = RetryUtil.getRetryPolicy(connectionOptions.getRetry()); this.operationTimeout = connectionOptions.getRetry().getTryTimeout(); this.senderSettleMode = senderSettleMode; this.receiverSettleMode = receiverSettleMode; this.connectionMono = Mono.fromCallable(this::getOrCreateConnection) .flatMap(reactorConnection -> { final Mono<AmqpEndpointState> activeEndpoint = getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(operationTimeout, Mono.error(new AmqpException(true, String.format( "Connection '%s' not opened within AmqpRetryOptions.tryTimeout(): %s", connectionId, operationTimeout), handler.getErrorContext()))); return activeEndpoint.thenReturn(reactorConnection); }) .doOnError(error -> { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already disposed: Error occurred while connection was starting.", error); } else { closeAsync(new AmqpShutdownSignal(false, false, "Error occurred while connection was starting. Error: " + error)).subscribe(); } }); this.endpointStates = this.handler.getEndpointStates() .takeUntilOther(shutdownSignalSink.asMono()) .map(state -> { logger.verbose("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .onErrorResume(error -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to error."); return closeAsync(new AmqpShutdownSignal(false, false, error.getMessage())).then(Mono.error(error)); } else { return Mono.error(error); } }) .doOnComplete(() -> { if (!isDisposed.getAndSet(true)) { logger.verbose("Disposing of active sessions due to connection close."); closeAsync(new AmqpShutdownSignal(false, false, "Connection handler closed.")).subscribe(); } }) .cache(1); this.subscriptions = Disposables.composite(this.endpointStates.subscribe()); } /** * {@inheritDoc} */ @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } /** * Gets the shutdown signal associated with this connection. When it emits, the underlying connection is closed. * * @return Shutdown signals associated with this connection. It emits a signal when the underlying connection is * closed. */ @Override public Flux<AmqpShutdownSignal> getShutdownSignals() { return shutdownSignalSink.asMono().cache().flux(); } @Override public Mono<AmqpManagementNode> getManagementNode(String entityPath) { return Mono.defer(() -> { if (isDisposed()) { return Mono.error(logger.atError() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(Exceptions.propagate(new IllegalStateException("Connection is disposed. Cannot get management instance.")))); } final AmqpManagementNode existing = managementNodes.get(entityPath); if (existing != null) { return Mono.just(existing); } final TokenManager tokenManager = new AzureTokenManagerProvider(connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()) .getTokenManager(getClaimsBasedSecurityNode(), entityPath); return tokenManager.authorize().thenReturn(managementNodes.compute(entityPath, (key, current) -> { if (current != null) { logger.info("A management node exists already, returning it."); tokenManager.close(); return current; } final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME; final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME; final String address = entityPath + "/" + MANAGEMENT_ADDRESS; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue("address", address) .log("Creating management node."); final AmqpChannelProcessor<RequestResponseChannel> requestResponseChannel = createRequestResponseChannel(sessionName, linkName, address); return new ManagementChannel(requestResponseChannel, getFullyQualifiedNamespace(), entityPath, tokenManager); })); }); } /** * {@inheritDoc} */ @Override public Mono<ClaimsBasedSecurityNode> getClaimsBasedSecurityNode() { return connectionMono.then(Mono.fromCallable(() -> getOrCreateCBSNode())); } @Override public String getId() { return connectionId; } /** * {@inheritDoc} */ @Override public String getFullyQualifiedNamespace() { return handler.getHostname(); } /** * {@inheritDoc} */ @Override public int getMaxFrameSize() { return handler.getMaxFrameSize(); } /** * {@inheritDoc} */ @Override public Map<String, Object> getConnectionProperties() { return handler.getConnectionProperties(); } /** * {@inheritDoc} */ @Override public Mono<AmqpSession> createSession(String sessionName) { return connectionMono.map(connection -> { final SessionSubscription sessionSubscription = sessionMap.computeIfAbsent(sessionName, key -> { final SessionHandler sessionHandler = handlerProvider.createSessionHandler(connectionId, getFullyQualifiedNamespace(), key, connectionOptions.getRetry().getTryTimeout()); final Session session = connection.session(); BaseHandler.setHandler(session, sessionHandler); final AmqpSession amqpSession = createSession(key, session, sessionHandler); final Disposable subscription = amqpSession.getEndpointStates() .subscribe(state -> { }, error -> { if (isDisposed.get()) { return; } logger.atInfo() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Error occurred. Removing and disposing session", error); removeSession(key); }, () -> { if (isDisposed.get()) { return; } logger.atVerbose() .addKeyValue(SESSION_NAME_KEY, sessionName) .log("Complete. Removing and disposing session."); removeSession(key); }); return new SessionSubscription(amqpSession, subscription); }); return sessionSubscription; }).flatMap(sessionSubscription -> { final Mono<AmqpEndpointState> activeSession = sessionSubscription.getSession().getEndpointStates() .filter(state -> state == AmqpEndpointState.ACTIVE) .next() .timeout(retryPolicy.getRetryOptions().getTryTimeout(), Mono.error(new AmqpException(true, String.format("connectionId[%s] sessionName[%s] Timeout waiting for session to be active.", connectionId, sessionName), handler.getErrorContext()))); return activeSession.thenReturn(sessionSubscription.getSession()); }); } /** * Creates a new AMQP session with the given parameters. * * @param sessionName Name of the AMQP session. * @param session The reactor session associated with this session. * @param handler Session handler for the reactor session. * * @return A new instance of AMQP session. */ protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) { return new ReactorSession(this, session, handler, sessionName, reactorProvider, handlerProvider, getClaimsBasedSecurityNode(), tokenManagerProvider, messageSerializer, connectionOptions.getRetry()); } /** * {@inheritDoc} */ @Override public boolean removeSession(String sessionName) { if (sessionName == null) { return false; } final SessionSubscription removed = sessionMap.remove(sessionName); if (removed != null) { removed.dispose(); } return removed != null; } @Override public boolean isDisposed() { return isDisposed.get(); } /** * {@inheritDoc} */ @Override public void dispose() { final Duration timeout = operationTimeout.plus(operationTimeout); closeAsync().block(timeout); } /** * Gets the active AMQP connection for this instance. * * @return The AMQP connection. * * @throws AmqpException if the {@link Connection} was not transitioned to an active state within the given * {@link AmqpRetryOptions */ protected Mono<Connection> getReactorConnection() { return connectionMono; } /** * Creates a bidirectional link between the message broker and the client. * * @param sessionName Name of the session. * @param linkName Name of the link. * @param entityPath Address to the message broker. * * @return A new {@link RequestResponseChannel} to communicate with the message broker. */ protected AmqpChannelProcessor<RequestResponseChannel> createRequestResponseChannel(String sessionName, String linkName, String entityPath) { Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); final Flux<RequestResponseChannel> createChannel = createSession(sessionName) .cast(ReactorSession.class) .map(reactorSession -> new RequestResponseChannel(this, getId(), getFullyQualifiedNamespace(), linkName, entityPath, reactorSession.session(), connectionOptions.getRetry(), handlerProvider, reactorProvider, messageSerializer, senderSettleMode, receiverSettleMode)) .doOnNext(e -> { logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(LINK_NAME_KEY, linkName) .log("Emitting new response channel."); }) .repeat(); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(ENTITY_PATH_KEY, entityPath); return createChannel .subscribeWith(new AmqpChannelProcessor<>(getFullyQualifiedNamespace(), channel -> channel.getEndpointStates(), retryPolicy, loggingContext)); } @Override public Mono<Void> closeAsync() { if (isDisposed.getAndSet(true)) { logger.verbose("Connection was already closed. Not disposing again."); return isClosedMono.asMono(); } return closeAsync(new AmqpShutdownSignal(false, true, "Disposed by client.")); } private synchronized void closeConnectionWork() { if (connection == null) { isClosedMono.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atInfo(), signalType, emitResult) .log("Unable to complete closeMono."); return false; }); return; } connection.close(); handler.close(); final ArrayList<Mono<Void>> closingSessions = new ArrayList<>(); sessionMap.values().forEach(link -> closingSessions.add(link.isClosed())); final Mono<Void> closedExecutor = executor != null ? Mono.defer(() -> { synchronized (this) { logger.info("Closing executor."); return executor.closeAsync(); } }) : Mono.empty(); final Mono<Void> closeSessionAndExecutorMono = Mono.when(closingSessions) .timeout(operationTimeout) .onErrorResume(error -> { logger.info("Timed out waiting for all sessions to close."); return Mono.empty(); }) .then(closedExecutor) .then(Mono.fromRunnable(() -> { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit connection closed signal."); return false; }); subscriptions.dispose(); })); subscriptions.add(closeSessionAndExecutorMono.subscribe()); } private synchronized ClaimsBasedSecurityNode getOrCreateCBSNode() { if (cbsChannel == null) { logger.info("Setting CBS channel."); cbsChannelProcessor = createRequestResponseChannel(CBS_SESSION_NAME, CBS_LINK_NAME, CBS_ADDRESS); cbsChannel = new ClaimsBasedSecurityChannel( cbsChannelProcessor, connectionOptions.getTokenCredential(), connectionOptions.getAuthorizationType(), connectionOptions.getRetry()); } return cbsChannel; } private synchronized Connection getOrCreateConnection() throws IOException { if (connection == null) { logger.atInfo() .addKeyValue(HOSTNAME_KEY, handler.getHostname()) .addKeyValue("port", handler.getProtocolPort()) .log("Creating and starting connection."); final Reactor reactor = reactorProvider.createReactor(connectionId, handler.getMaxFrameSize()); connection = reactor.connectionToHost(handler.getHostname(), handler.getProtocolPort(), handler); final ReactorExceptionHandler reactorExceptionHandler = new ReactorExceptionHandler(); final Duration timeoutDivided = connectionOptions.getRetry().getTryTimeout().dividedBy(2); final Duration pendingTasksDuration = ClientConstants.SERVER_BUSY_WAIT_TIME.compareTo(timeoutDivided) < 0 ? ClientConstants.SERVER_BUSY_WAIT_TIME : timeoutDivided; final Scheduler scheduler = Schedulers.newSingle("reactor-executor"); executor = new ReactorExecutor(reactor, scheduler, connectionId, reactorExceptionHandler, pendingTasksDuration, connectionOptions.getFullyQualifiedNamespace()); final Mono<Void> executorCloseMono = Mono.defer(() -> { synchronized (this) { return executor.closeAsync(); } }); reactorProvider.getReactorDispatcher().getShutdownSignal() .flatMap(signal -> { reactorExceptionHandler.onConnectionShutdown(signal); return executorCloseMono; }) .onErrorResume(error -> { reactorExceptionHandler.onConnectionError(error); return executorCloseMono; }) .subscribe(); executor.start(); } return connection; } private final class ReactorExceptionHandler extends AmqpExceptionHandler { private ReactorExceptionHandler() { super(); } @Override public void onConnectionError(Throwable exception) { logger.atInfo() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionError, Starting new reactor", exception); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onReactorError: Disposing."); closeAsync(new AmqpShutdownSignal(false, false, "onReactorError: " + exception.toString())) .subscribe(); } } @Override void onConnectionShutdown(AmqpShutdownSignal shutdownSignal) { addShutdownSignal(logger.atInfo(), shutdownSignal) .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown. Shutting down."); if (!isDisposed.getAndSet(true)) { logger.atVerbose() .addKeyValue(FULLY_QUALIFIED_NAMESPACE_KEY, getFullyQualifiedNamespace()) .log("onConnectionShutdown: disposing."); closeAsync(shutdownSignal).subscribe(); } } } private static final class SessionSubscription { private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AmqpSession session; private final Disposable subscription; private SessionSubscription(AmqpSession session, Disposable subscription) { this.session = session; this.subscription = subscription; } private AmqpSession getSession() { return session; } private void dispose() { if (isDisposed.getAndSet(true)) { return; } if (session instanceof ReactorSession) { ((ReactorSession) session).closeAsync("Closing session.", null, true) .subscribe(); } else { session.dispose(); } subscription.dispose(); } private Mono<Void> isClosed() { if (session instanceof ReactorSession) { return ((ReactorSession) session).isClosed(); } else { return Mono.empty(); } } } }
I believe one of the reasons we were passing logger was - to use the logger instance associated with the "concrete type" (deriving from generic AmqpChannelProcessor), so that class-name in the log lines can be used.
public AmqpChannelProcessor(String fullyQualifiedNamespace, Function<T, Flux<AmqpEndpointState>> endpointStatesFunction, AmqpRetryPolicy retryPolicy, Map<String, Object> loggingContext) { this.endpointStatesFunction = Objects.requireNonNull(endpointStatesFunction, "'endpointStates' cannot be null."); this.retryPolicy = Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null."); this.logger = new ClientLogger(AmqpChannelProcessor.class, Objects.requireNonNull(loggingContext, "'loggingContext' cannot be null.")); this.errorContext = new AmqpErrorContext(fullyQualifiedNamespace); }
this.logger = new ClientLogger(AmqpChannelProcessor.class, Objects.requireNonNull(loggingContext, "'loggingContext' cannot be null."));
public AmqpChannelProcessor(String fullyQualifiedNamespace, Function<T, Flux<AmqpEndpointState>> endpointStatesFunction, AmqpRetryPolicy retryPolicy, Map<String, Object> loggingContext) { this.endpointStatesFunction = Objects.requireNonNull(endpointStatesFunction, "'endpointStates' cannot be null."); this.retryPolicy = Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null."); this.logger = new ClientLogger(getClass(), Objects.requireNonNull(loggingContext, "'loggingContext' cannot be null.")); this.errorContext = new AmqpErrorContext(fullyQualifiedNamespace); }
class AmqpChannelProcessor<T> extends Mono<T> implements Processor<T, T>, CoreSubscriber<T>, Disposable { @SuppressWarnings("rawtypes") private static final AtomicReferenceFieldUpdater<AmqpChannelProcessor, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(AmqpChannelProcessor.class, Subscription.class, "upstream"); private static final String RETRY_NUMBER_KEY = "retry"; private final ClientLogger logger; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicBoolean isRequested = new AtomicBoolean(); private final AtomicBoolean isRetryPending = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Object lock = new Object(); private final AmqpRetryPolicy retryPolicy; private final Function<T, Flux<AmqpEndpointState>> endpointStatesFunction; private final AmqpErrorContext errorContext; private volatile Subscription upstream; private volatile ConcurrentLinkedDeque<ChannelSubscriber<T>> subscribers = new ConcurrentLinkedDeque<>(); private volatile Throwable lastError; private volatile T currentChannel; private volatile Disposable connectionSubscription; private volatile Disposable retrySubscription; /** * @deprecated Use constructor overload that does not take {@link ClientLogger} */ @Deprecated public AmqpChannelProcessor(String fullyQualifiedNamespace, String entityPath, Function<T, Flux<AmqpEndpointState>> endpointStatesFunction, AmqpRetryPolicy retryPolicy, ClientLogger logger) { this.endpointStatesFunction = Objects.requireNonNull(endpointStatesFunction, "'endpointStates' cannot be null."); this.retryPolicy = Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null."); this.logger = Objects.requireNonNull(logger, "'logger' cannot be null."); this.errorContext = new AmqpErrorContext(fullyQualifiedNamespace); } @Override public void onSubscribe(Subscription subscription) { if (Operators.setOnce(UPSTREAM, this, subscription)) { isRequested.set(true); subscription.request(1); } else { logger.warning("Processors can only be subscribed to once."); } } @Override public void onNext(T amqpChannel) { logger.info("Setting next AMQP channel."); Objects.requireNonNull(amqpChannel, "'amqpChannel' cannot be null."); final T oldChannel; final Disposable oldSubscription; synchronized (lock) { oldChannel = currentChannel; oldSubscription = connectionSubscription; currentChannel = amqpChannel; final ConcurrentLinkedDeque<ChannelSubscriber<T>> currentSubscribers = subscribers; logger.info("Next AMQP channel received, updating {} current subscribers", subscribers.size()); currentSubscribers.forEach(subscription -> subscription.onNext(amqpChannel)); connectionSubscription = endpointStatesFunction.apply(amqpChannel).subscribe( state -> { if (state == AmqpEndpointState.ACTIVE) { retryAttempts.set(0); logger.info("Channel is now active."); } }, error -> { setAndClearChannel(); onError(error); }, () -> { if (isDisposed()) { logger.info("Channel is disposed."); } else { logger.info("Channel is closed. Requesting upstream."); setAndClearChannel(); requestUpstream(); } }); } close(oldChannel); if (oldSubscription != null) { oldSubscription.dispose(); } isRequested.set(false); } /** * When downstream or upstream encounters an error, calculates whether to request another item upstream. * * @param throwable Exception to analyse. */ @Override public void onError(Throwable throwable) { Objects.requireNonNull(throwable, "'throwable' is required."); if (isRetryPending.get() && retryPolicy.calculateRetryDelay(throwable, retryAttempts.get()) != null) { logger.warning("Retry is already pending. Ignoring transient error.", throwable); return; } final int attemptsMade = retryAttempts.incrementAndGet(); final int attempts; final Duration retryInterval; if (((throwable instanceof AmqpException) && ((AmqpException) throwable).isTransient()) || (throwable instanceof IllegalStateException) || (throwable instanceof RejectedExecutionException)) { attempts = Math.min(attemptsMade, retryPolicy.getMaxRetries()); final Throwable throwableToUse = throwable instanceof AmqpException ? throwable : new AmqpException(true, "Non-AmqpException occurred upstream.", throwable, errorContext); retryInterval = retryPolicy.calculateRetryDelay(throwableToUse, attempts); } else { attempts = attemptsMade; retryInterval = retryPolicy.calculateRetryDelay(throwable, attempts); } if (retryInterval != null) { if (isRetryPending.getAndSet(true)) { retryAttempts.decrementAndGet(); return; } logger.atInfo() .addKeyValue(RETRY_NUMBER_KEY, attempts) .addKeyValue(INTERVAL_KEY, retryInterval.toMillis()) .log("Transient error occurred. Retrying.", throwable); retrySubscription = Mono.delay(retryInterval).subscribe(i -> { if (isDisposed()) { logger.atInfo() .addKeyValue(RETRY_NUMBER_KEY, attempts) .log("Not requesting from upstream. Processor is disposed."); } else { logger.atInfo() .addKeyValue(RETRY_NUMBER_KEY, attempts) .log("Requesting from upstream."); requestUpstream(); isRetryPending.set(false); } }); } else { logger.atWarning() .addKeyValue(RETRY_NUMBER_KEY, attempts) .log("Retry attempts exhausted or exception was not retriable.", throwable); lastError = throwable; isDisposed.set(true); dispose(); synchronized (lock) { final ConcurrentLinkedDeque<ChannelSubscriber<T>> currentSubscribers = subscribers; subscribers = new ConcurrentLinkedDeque<>(); logger.info("Error in AMQP channel processor. Notifying {} subscribers.", currentSubscribers.size()); currentSubscribers.forEach(subscriber -> subscriber.onError(throwable)); } } } @Override public void onComplete() { logger.info("Upstream connection publisher was completed. Terminating processor."); isDisposed.set(true); synchronized (lock) { final ConcurrentLinkedDeque<ChannelSubscriber<T>> currentSubscribers = subscribers; subscribers = new ConcurrentLinkedDeque<>(); logger.info("AMQP channel processor completed. Notifying {} subscribers.", currentSubscribers.size()); currentSubscribers.forEach(subscriber -> subscriber.onComplete()); } } @Override public void subscribe(CoreSubscriber<? super T> actual) { if (isDisposed()) { if (lastError != null) { actual.onSubscribe(Operators.emptySubscription()); actual.onError(lastError); } else { Operators.error(actual, logger.logExceptionAsError(new IllegalStateException("Cannot subscribe. Processor is already terminated."))); } return; } final ChannelSubscriber<T> subscriber = new ChannelSubscriber<T>(actual, this); actual.onSubscribe(subscriber); synchronized (lock) { if (currentChannel != null) { subscriber.complete(currentChannel); return; } } subscribers.add(subscriber); logger.atVerbose().addKeyValue("subscribers", subscribers.size()).log("Added a subscriber."); if (!isRetryPending.get()) { requestUpstream(); } } @Override public void dispose() { if (isDisposed.getAndSet(true)) { return; } if (retrySubscription != null && !retrySubscription.isDisposed()) { retrySubscription.dispose(); } onComplete(); synchronized (lock) { setAndClearChannel(); } } @Override public boolean isDisposed() { return isDisposed.get(); } private void requestUpstream() { if (currentChannel != null) { logger.verbose("Connection exists, not requesting another."); return; } else if (isDisposed()) { logger.verbose("Is already disposed."); return; } final Subscription subscription = UPSTREAM.get(this); if (subscription == null) { logger.warning("There is no upstream subscription."); return; } if (!isRequested.getAndSet(true)) { logger.info("Connection not requested, yet. Requesting one."); subscription.request(1); } } private void setAndClearChannel() { T oldChannel; synchronized (lock) { oldChannel = currentChannel; currentChannel = null; } close(oldChannel); } /** * Checks the current state of the channel for this channel and returns true if the channel is null or if this * processor is disposed. * * @return true if the current channel in the processor is null or if the processor is disposed */ public boolean isChannelClosed() { synchronized (lock) { return currentChannel == null || isDisposed(); } } private void close(T channel) { if (channel instanceof AsyncCloseable) { ((AsyncCloseable) channel).closeAsync().subscribe(); } else if (channel instanceof AutoCloseable) { try { ((AutoCloseable) channel).close(); } catch (Exception error) { logger.warning("Error occurred closing AutoCloseable channel.", error); } } else if (channel instanceof Disposable) { try { ((Disposable) channel).dispose(); } catch (Exception error) { logger.warning("Error occurred closing Disposable channel.", error); } } } /** * Represents the decorator-subscriber wrapping a downstream subscriber to AmqpChannelProcessor. * These are the subscribers waiting to receive a channel that is yet to be available in the AmqpChannelProcessor. * The AmqpChannelProcessor tracks a list of such waiting subscribers; once the processor receives * a result (channel, error or disposal) from it's upstream, each decorated-subscriber will be notified, * which removes itself from the tracking list, then propagates the notification to the wrapped subscriber. */ private static final class ChannelSubscriber<T> extends Operators.MonoSubscriber<T, T> { private final AmqpChannelProcessor<T> processor; private ChannelSubscriber(CoreSubscriber<? super T> actual, AmqpChannelProcessor<T> processor) { super(actual); this.processor = processor; } @Override public void cancel() { processor.subscribers.remove(this); super.cancel(); } @Override public void onComplete() { if (!isCancelled()) { processor.subscribers.remove(this); actual.onComplete(); } } @Override public void onNext(T channel) { if (!isCancelled()) { processor.subscribers.remove(this); super.complete(channel); } } @Override public void onError(Throwable throwable) { if (!isCancelled()) { processor.subscribers.remove(this); actual.onError(throwable); } else { Operators.onErrorDropped(throwable, currentContext()); } } } }
class AmqpChannelProcessor<T> extends Mono<T> implements Processor<T, T>, CoreSubscriber<T>, Disposable { @SuppressWarnings("rawtypes") private static final AtomicReferenceFieldUpdater<AmqpChannelProcessor, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(AmqpChannelProcessor.class, Subscription.class, "upstream"); private static final String RETRY_NUMBER_KEY = "retry"; private final ClientLogger logger; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicBoolean isRequested = new AtomicBoolean(); private final AtomicBoolean isRetryPending = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Object lock = new Object(); private final AmqpRetryPolicy retryPolicy; private final Function<T, Flux<AmqpEndpointState>> endpointStatesFunction; private final AmqpErrorContext errorContext; private volatile Subscription upstream; private volatile ConcurrentLinkedDeque<ChannelSubscriber<T>> subscribers = new ConcurrentLinkedDeque<>(); private volatile Throwable lastError; private volatile T currentChannel; private volatile Disposable connectionSubscription; private volatile Disposable retrySubscription; /** * @deprecated Use constructor overload that does not take {@link ClientLogger} */ @Deprecated public AmqpChannelProcessor(String fullyQualifiedNamespace, String entityPath, Function<T, Flux<AmqpEndpointState>> endpointStatesFunction, AmqpRetryPolicy retryPolicy, ClientLogger logger) { this.endpointStatesFunction = Objects.requireNonNull(endpointStatesFunction, "'endpointStates' cannot be null."); this.retryPolicy = Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null."); this.logger = Objects.requireNonNull(logger, "'logger' cannot be null."); this.errorContext = new AmqpErrorContext(fullyQualifiedNamespace); } @Override public void onSubscribe(Subscription subscription) { if (Operators.setOnce(UPSTREAM, this, subscription)) { isRequested.set(true); subscription.request(1); } else { logger.warning("Processors can only be subscribed to once."); } } @Override public void onNext(T amqpChannel) { logger.info("Setting next AMQP channel."); Objects.requireNonNull(amqpChannel, "'amqpChannel' cannot be null."); final T oldChannel; final Disposable oldSubscription; synchronized (lock) { oldChannel = currentChannel; oldSubscription = connectionSubscription; currentChannel = amqpChannel; final ConcurrentLinkedDeque<ChannelSubscriber<T>> currentSubscribers = subscribers; logger.info("Next AMQP channel received, updating {} current subscribers", subscribers.size()); currentSubscribers.forEach(subscription -> subscription.onNext(amqpChannel)); connectionSubscription = endpointStatesFunction.apply(amqpChannel).subscribe( state -> { if (state == AmqpEndpointState.ACTIVE) { retryAttempts.set(0); logger.info("Channel is now active."); } }, error -> { setAndClearChannel(); onError(error); }, () -> { if (isDisposed()) { logger.info("Channel is disposed."); } else { logger.info("Channel is closed. Requesting upstream."); setAndClearChannel(); requestUpstream(); } }); } close(oldChannel); if (oldSubscription != null) { oldSubscription.dispose(); } isRequested.set(false); } /** * When downstream or upstream encounters an error, calculates whether to request another item upstream. * * @param throwable Exception to analyse. */ @Override public void onError(Throwable throwable) { Objects.requireNonNull(throwable, "'throwable' is required."); if (isRetryPending.get() && retryPolicy.calculateRetryDelay(throwable, retryAttempts.get()) != null) { logger.warning("Retry is already pending. Ignoring transient error.", throwable); return; } final int attemptsMade = retryAttempts.incrementAndGet(); final int attempts; final Duration retryInterval; if (((throwable instanceof AmqpException) && ((AmqpException) throwable).isTransient()) || (throwable instanceof IllegalStateException) || (throwable instanceof RejectedExecutionException)) { attempts = Math.min(attemptsMade, retryPolicy.getMaxRetries()); final Throwable throwableToUse = throwable instanceof AmqpException ? throwable : new AmqpException(true, "Non-AmqpException occurred upstream.", throwable, errorContext); retryInterval = retryPolicy.calculateRetryDelay(throwableToUse, attempts); } else { attempts = attemptsMade; retryInterval = retryPolicy.calculateRetryDelay(throwable, attempts); } if (retryInterval != null) { if (isRetryPending.getAndSet(true)) { retryAttempts.decrementAndGet(); return; } logger.atInfo() .addKeyValue(RETRY_NUMBER_KEY, attempts) .addKeyValue(INTERVAL_KEY, retryInterval.toMillis()) .log("Transient error occurred. Retrying.", throwable); retrySubscription = Mono.delay(retryInterval).subscribe(i -> { if (isDisposed()) { logger.atInfo() .addKeyValue(RETRY_NUMBER_KEY, attempts) .log("Not requesting from upstream. Processor is disposed."); } else { logger.atInfo() .addKeyValue(RETRY_NUMBER_KEY, attempts) .log("Requesting from upstream."); requestUpstream(); isRetryPending.set(false); } }); } else { logger.atWarning() .addKeyValue(RETRY_NUMBER_KEY, attempts) .log("Retry attempts exhausted or exception was not retriable.", throwable); lastError = throwable; isDisposed.set(true); dispose(); synchronized (lock) { final ConcurrentLinkedDeque<ChannelSubscriber<T>> currentSubscribers = subscribers; subscribers = new ConcurrentLinkedDeque<>(); logger.info("Error in AMQP channel processor. Notifying {} subscribers.", currentSubscribers.size()); currentSubscribers.forEach(subscriber -> subscriber.onError(throwable)); } } } @Override public void onComplete() { logger.info("Upstream connection publisher was completed. Terminating processor."); isDisposed.set(true); synchronized (lock) { final ConcurrentLinkedDeque<ChannelSubscriber<T>> currentSubscribers = subscribers; subscribers = new ConcurrentLinkedDeque<>(); logger.info("AMQP channel processor completed. Notifying {} subscribers.", currentSubscribers.size()); currentSubscribers.forEach(subscriber -> subscriber.onComplete()); } } @Override public void subscribe(CoreSubscriber<? super T> actual) { if (isDisposed()) { if (lastError != null) { actual.onSubscribe(Operators.emptySubscription()); actual.onError(lastError); } else { Operators.error(actual, logger.logExceptionAsError(new IllegalStateException("Cannot subscribe. Processor is already terminated."))); } return; } final ChannelSubscriber<T> subscriber = new ChannelSubscriber<T>(actual, this); actual.onSubscribe(subscriber); synchronized (lock) { if (currentChannel != null) { subscriber.complete(currentChannel); return; } } subscribers.add(subscriber); logger.atVerbose().addKeyValue("subscribers", subscribers.size()).log("Added a subscriber."); if (!isRetryPending.get()) { requestUpstream(); } } @Override public void dispose() { if (isDisposed.getAndSet(true)) { return; } if (retrySubscription != null && !retrySubscription.isDisposed()) { retrySubscription.dispose(); } onComplete(); synchronized (lock) { setAndClearChannel(); } } @Override public boolean isDisposed() { return isDisposed.get(); } private void requestUpstream() { if (currentChannel != null) { logger.verbose("Connection exists, not requesting another."); return; } else if (isDisposed()) { logger.verbose("Is already disposed."); return; } final Subscription subscription = UPSTREAM.get(this); if (subscription == null) { logger.warning("There is no upstream subscription."); return; } if (!isRequested.getAndSet(true)) { logger.info("Connection not requested, yet. Requesting one."); subscription.request(1); } } private void setAndClearChannel() { T oldChannel; synchronized (lock) { oldChannel = currentChannel; currentChannel = null; } close(oldChannel); } /** * Checks the current state of the channel for this channel and returns true if the channel is null or if this * processor is disposed. * * @return true if the current channel in the processor is null or if the processor is disposed */ public boolean isChannelClosed() { synchronized (lock) { return currentChannel == null || isDisposed(); } } private void close(T channel) { if (channel instanceof AsyncCloseable) { ((AsyncCloseable) channel).closeAsync().subscribe(); } else if (channel instanceof AutoCloseable) { try { ((AutoCloseable) channel).close(); } catch (Exception error) { logger.warning("Error occurred closing AutoCloseable channel.", error); } } else if (channel instanceof Disposable) { try { ((Disposable) channel).dispose(); } catch (Exception error) { logger.warning("Error occurred closing Disposable channel.", error); } } } /** * Represents the decorator-subscriber wrapping a downstream subscriber to AmqpChannelProcessor. * These are the subscribers waiting to receive a channel that is yet to be available in the AmqpChannelProcessor. * The AmqpChannelProcessor tracks a list of such waiting subscribers; once the processor receives * a result (channel, error or disposal) from it's upstream, each decorated-subscriber will be notified, * which removes itself from the tracking list, then propagates the notification to the wrapped subscriber. */ private static final class ChannelSubscriber<T> extends Operators.MonoSubscriber<T, T> { private final AmqpChannelProcessor<T> processor; private ChannelSubscriber(CoreSubscriber<? super T> actual, AmqpChannelProcessor<T> processor) { super(actual); this.processor = processor; } @Override public void cancel() { processor.subscribers.remove(this); super.cancel(); } @Override public void onComplete() { if (!isCancelled()) { processor.subscribers.remove(this); actual.onComplete(); } } @Override public void onNext(T channel) { if (!isCancelled()) { processor.subscribers.remove(this); super.complete(channel); } } @Override public void onError(Throwable throwable) { if (!isCancelled()) { processor.subscribers.remove(this); actual.onError(throwable); } else { Operators.onErrorDropped(throwable, currentContext()); } } } }
This is a great new check! Though in this case I'd prefer `Objects.equals` since both sides could contain null.
private void checkLoggerInstantiation(DetailAST literalNewToken) { final DetailAST identToken = literalNewToken.findFirstToken(TokenTypes.IDENT); if (identToken == null || !identToken.getText().equals(CLIENT_LOGGER)) { return; } TokenUtil.findFirstTokenByPredicate(literalNewToken.findFirstToken(TokenTypes.ELIST), exprToken -> { if (exprToken.getType() != TokenTypes.EXPR || exprToken.getFirstChild().getType() != TokenTypes.DOT) { return false; } final String containerClassName = FullIdent.createFullIdent(exprToken.getFirstChild()).getText(); final String className = classNameDeque.peek(); if (!(className + ".class").equals(containerClassName)) { log(exprToken, String.format(LOGGER_NAME_MISMATCH_ERROR, className, containerClassName)); } return true; }); }
if (!(className + ".class").equals(containerClassName)) {
private void checkLoggerInstantiation(DetailAST literalNewToken) { final DetailAST identToken = literalNewToken.findFirstToken(TokenTypes.IDENT); if (identToken == null || !identToken.getText().equals(CLIENT_LOGGER)) { return; } TokenUtil.findFirstTokenByPredicate(literalNewToken.findFirstToken(TokenTypes.ELIST), exprToken -> { if (exprToken.getType() != TokenTypes.EXPR || exprToken.getFirstChild().getType() != TokenTypes.DOT) { return false; } final String containerClassName = FullIdent.createFullIdent(exprToken.getFirstChild()).getText(); final String className = classNameDeque.peek(); if (!Objects.equals(className + ".class", containerClassName)) { log(exprToken, String.format(LOGGER_NAME_MISMATCH_ERROR, className, containerClassName)); } return true; }); }
class name for the same class. * * @param literalNewToken LITERAL_NEW node */
class name for the same class. * * @param literalNewToken LITERAL_NEW node */
nit: Do you think it would be beneficial to introduce a boolean field that represents if this has been set or not (which can be determined in the constructor), mostly to aid in readability (e.g. `if (globalContextIsEmpty) { ... }`), but also slightly to avoid all of the isEmpty calls?
public void info(String message) { if (logger.isInfoEnabled()) { if (globalContextSerialized.isEmpty()) { logger.info(removeNewLinesFromLogMessage(message)); } else { atInfo().log(message); } } }
if (globalContextSerialized.isEmpty()) {
public void info(String message) { if (logger.isInfoEnabled()) { if (hasGlobalContext) { atInfo().log(message); } else { logger.info(removeNewLinesFromLogMessage(message)); } } }
class name using the {@link LoggerFactory}
class name using the {@link LoggerFactory}
Is this eagerly getting the message, even in cases when `canLogAtLevel(logLevel)` returns false? Should you be calling `.log(messageSupplier, throwable)` instead?
private void performDeferredLogging(LogLevel logLevel, Supplier<String> messageSupplier, Throwable throwable) { if (!globalContextSerialized.isEmpty()) { LoggingEventBuilder.create(logger, logLevel, globalContextSerialized, canLogAtLevel(logLevel)) .log(messageSupplier.get(), throwable); return; } String message = removeNewLinesFromLogMessage(messageSupplier.get()); String throwableMessage = (throwable != null) ? throwable.getMessage() : ""; switch (logLevel) { case VERBOSE: if (throwable != null) { logger.debug(message, throwable); } else { logger.debug(message); } break; case INFORMATIONAL: logger.info(message); break; case WARNING: if (!CoreUtils.isNullOrEmpty(throwableMessage)) { message += System.lineSeparator() + throwableMessage; } logger.warn(message); break; case ERROR: if (!CoreUtils.isNullOrEmpty(throwableMessage)) { message += System.lineSeparator() + throwableMessage; } logger.error(message); break; default: break; } }
.log(messageSupplier.get(), throwable);
private void performDeferredLogging(LogLevel logLevel, Supplier<String> messageSupplier, Throwable throwable) { if (hasGlobalContext) { LoggingEventBuilder.create(logger, logLevel, globalContextSerialized, true) .log(messageSupplier, throwable); return; } String message = removeNewLinesFromLogMessage(messageSupplier.get()); String throwableMessage = (throwable != null) ? throwable.getMessage() : ""; switch (logLevel) { case VERBOSE: if (throwable != null) { logger.debug(message, throwable); } else { logger.debug(message); } break; case INFORMATIONAL: logger.info(message); break; case WARNING: if (!CoreUtils.isNullOrEmpty(throwableMessage)) { message += System.lineSeparator() + throwableMessage; } logger.warn(message); break; case ERROR: if (!CoreUtils.isNullOrEmpty(throwableMessage)) { message += System.lineSeparator() + throwableMessage; } logger.error(message); break; default: break; } }
class name using the {@link LoggerFactory}
class name using the {@link LoggerFactory}
Since it's optional, should we allow `context` to be `null`?
public ClientLogger(String className, Map<String, Object> context) { Objects.requireNonNull(context, "'context' cannot be null."); Logger initLogger = LoggerFactory.getLogger(className); logger = initLogger instanceof NOPLogger ? new DefaultLogger(className) : initLogger; globalContextSerialized = LoggingEventBuilder.writeJsonFragment(context); hasGlobalContext = !CoreUtils.isNullOrEmpty(globalContextSerialized); }
Objects.requireNonNull(context, "'context' cannot be null.");
public ClientLogger(String className, Map<String, Object> context) { Logger initLogger = LoggerFactory.getLogger(className); logger = initLogger instanceof NOPLogger ? new DefaultLogger(className) : initLogger; globalContextSerialized = LoggingEventBuilder.writeJsonFragment(context); hasGlobalContext = !CoreUtils.isNullOrEmpty(globalContextSerialized); }
class name using the {@link LoggerFactory}
class name using the {@link LoggerFactory}
This hides potential exception thrown in map block. You have to either remove try-catch block or use flatMap instead of map.
public Mono<Void> runAsync() { return pipeline.send(request) .map(response -> { String date = response.getRequest().getHeaders().getValue("x-ms-date"); String signature = response.getRequest().getHeaders().getValue("Authorization"); try { checkSignatureCorrectness(date, signature); } catch (Exception e) { return Mono.error(e); } return Mono.empty(); }).then(); }
}).then();
public Mono<Void> runAsync() { return pipeline.send(request) .flatMap(response -> { String date = response.getRequest().getHeaders().getValue("x-ms-date"); String signature = response.getRequest().getHeaders().getValue("Authorization"); try { checkSignatureCorrectness(date, signature); } catch (Exception e) { return Mono.error(e); } return Mono.empty(); }).then(); }
class NoOpHttpClient implements HttpClient { @Override public Mono<HttpResponse> send(HttpRequest request) { HttpResponse response = new MockHttpResponse(request, 200); return Mono.just(response); } }
class NoOpHttpClient implements HttpClient { @Override public Mono<HttpResponse> send(HttpRequest request) { HttpResponse response = new MockHttpResponse(request, 200); return Mono.just(response); } }
the value better follow the same pattern, you can use the custom-managed-identity-client-id
void azureManagedIdentityClientIdFromUserConfig() { Properties properties = new Properties(); properties.put(PROPERTY_AZURE_CLIENT_ID, "client-id-from-env"); properties.put(AzureGlobalProperties.PREFIX + ".credential.managed-identity-client-id", "custom managed identity clientId"); PropertiesPropertySource propertiesPropertySource = new PropertiesPropertySource("test-properties", properties); ConfigurableEnvironment environment = getEnvironment(propertiesPropertySource); AzureGlobalProperties globalProperties = Binder.get(environment) .bind(AzureGlobalProperties.PREFIX, AzureGlobalProperties.class).get(); assertEquals("client-id-from-env", globalProperties.getCredential().getClientId()); assertEquals("custom managed identity clientId", globalProperties.getCredential().getManagedIdentityClientId()); assertEquals(null, globalProperties.getCredential().getUsername()); }
properties.put(AzureGlobalProperties.PREFIX + ".credential.managed-identity-client-id",
void azureManagedIdentityClientIdFromUserConfig() { Properties properties = new Properties(); properties.put(PROPERTY_AZURE_CLIENT_ID, "client-id-from-env"); properties.put(AzureGlobalProperties.PREFIX + ".credential.managed-identity-client-id", "custom-managed-identity-clientid"); PropertiesPropertySource propertiesPropertySource = new PropertiesPropertySource("test-properties", properties); ConfigurableEnvironment environment = getEnvironment(propertiesPropertySource); AzureGlobalProperties globalProperties = Binder.get(environment) .bind(AzureGlobalProperties.PREFIX, AzureGlobalProperties.class).get(); assertEquals("client-id-from-env", globalProperties.getCredential().getClientId()); assertEquals("custom-managed-identity-clientid", globalProperties.getCredential().getManagedIdentityClientId()); assertEquals(null, globalProperties.getCredential().getUsername()); }
class AzureGlobalConfigurationEnvironmentPostProcessorTest { @Test void springPropertyShouldHaveValueIfAzureCoreEnvSet() { PropertiesPropertySource propertiesPropertySource = buildTestProperties(PROPERTY_AZURE_CLIENT_ID, "test-client-id"); ConfigurableEnvironment environment = getEnvironment(propertiesPropertySource); assertEquals("test-client-id", environment.getProperty(PROPERTY_AZURE_CLIENT_ID)); assertEquals("test-client-id", environment.getProperty("spring.cloud.azure.credential.client-id")); } @Test void springPropertyShouldHaveValueIfAzureKeyVaultEnvSet() { PropertiesPropertySource propertiesPropertySource = buildTestProperties("AZURE_KEYVAULT_ENDPOINT", "test-endpoint"); ConfigurableEnvironment environment = getEnvironment(propertiesPropertySource); assertEquals("test-endpoint", environment.getProperty("AZURE_KEYVAULT_ENDPOINT")); assertEquals("test-endpoint", environment.getProperty("spring.cloud.azure.keyvault.secret.endpoint")); assertEquals("test-endpoint", environment.getProperty("spring.cloud.azure.keyvault.certificate.endpoint")); } @Test void springPropertyShouldHaveValueIfAzureEventHubsEnvSet() { PropertiesPropertySource propertiesPropertySource = buildTestProperties("AZURE_EVENT_HUBS_CONNECTION_STRING", "test-connection-string"); ConfigurableEnvironment environment = getEnvironment(propertiesPropertySource); assertEquals("test-connection-string", environment.getProperty("AZURE_EVENT_HUBS_CONNECTION_STRING")); assertEquals("test-connection-string", environment.getProperty("spring.cloud.azure.eventhubs.connection-string")); } @Test void azureCoreEnvShouldNotBeTakenIfSpringPropertiesSet() { Properties properties = new Properties(); properties.put(PROPERTY_AZURE_CLIENT_ID, "core-client-id"); properties.put("spring.cloud.azure.credential.client-id", "spring-client-id"); PropertiesPropertySource propertiesPropertySource = new PropertiesPropertySource("test-properties", properties); ConfigurableEnvironment environment = getEnvironment(propertiesPropertySource); assertEquals("core-client-id", environment.getProperty(PROPERTY_AZURE_CLIENT_ID)); assertEquals("spring-client-id", environment.getProperty("spring.cloud.azure.credential.client-id")); } @Test void azureSdkEnvShouldNotBeTakenIfSpringPropertiesSet() { Properties properties = new Properties(); properties.put("AZURE_KEYVAULT_ENDPOINT", "sdk-endpoint"); properties.put("spring.cloud.azure.keyvault.secret.endpoint", "spring-endpoint"); PropertiesPropertySource propertiesPropertySource = new PropertiesPropertySource("test-properties", properties); ConfigurableEnvironment environment = getEnvironment(propertiesPropertySource); assertEquals("sdk-endpoint", environment.getProperty("AZURE_KEYVAULT_ENDPOINT")); assertEquals("spring-endpoint", environment.getProperty("spring.cloud.azure.keyvault.secret.endpoint")); } @Test void azureCoreEnvShouldBindCorrect() { Properties properties = new Properties(); properties.put(PROPERTY_AZURE_CLIENT_ID, "core-client-id"); properties.put(PROPERTY_AZURE_CLIENT_SECRET, "core-client-secret"); properties.put(PROPERTY_AZURE_CLIENT_CERTIFICATE_PATH, "core-client-cert-path"); properties.put(PROPERTY_AZURE_USERNAME, "core-username"); properties.put(PROPERTY_AZURE_PASSWORD, "core-password"); properties.put(PROPERTY_AZURE_TENANT_ID, "core-tenant-id"); properties.put(PROPERTY_AZURE_SUBSCRIPTION_ID, "core-sub-id"); properties.put(PROPERTY_AZURE_CLOUD, "other"); properties.put(PROPERTY_AZURE_AUTHORITY_HOST, "aad"); properties.put(PROPERTY_AZURE_REQUEST_RETRY_COUNT, 3); properties.put(PROPERTY_AZURE_HTTP_LOG_DETAIL_LEVEL, "headers"); properties.put(PROPERTY_AZURE_REQUEST_CONNECT_TIMEOUT, 1000); properties.put(PROPERTY_AZURE_REQUEST_READ_TIMEOUT, 2000); properties.put(PROPERTY_AZURE_REQUEST_WRITE_TIMEOUT, 3000); properties.put(PROPERTY_AZURE_REQUEST_RESPONSE_TIMEOUT, 4000); properties.put(PROPERTY_NO_PROXY, "localhost"); PropertiesPropertySource propertiesPropertySource = new PropertiesPropertySource("test-properties", properties); ConfigurableEnvironment environment = getEnvironment(propertiesPropertySource); AzureGlobalProperties globalProperties = Binder.get(environment).bind(AzureGlobalProperties.PREFIX, AzureGlobalProperties.class).get(); assertEquals("core-client-id", globalProperties.getCredential().getClientId()); assertEquals("core-client-secret", globalProperties.getCredential().getClientSecret()); assertEquals("core-client-cert-path", globalProperties.getCredential().getClientCertificatePath()); assertEquals("core-username", globalProperties.getCredential().getUsername()); assertEquals("core-password", globalProperties.getCredential().getPassword()); assertEquals("core-tenant-id", globalProperties.getProfile().getTenantId()); assertEquals("core-sub-id", globalProperties.getProfile().getSubscriptionId()); assertEquals(AzureProfileAware.CloudType.OTHER, globalProperties.getProfile().getCloud()); assertEquals("aad", globalProperties.getProfile().getEnvironment().getActiveDirectoryEndpoint()); assertEquals(3, globalProperties.getRetry().getMaxAttempts()); assertEquals(HttpLogDetailLevel.HEADERS, globalProperties.getClient().getHttp().getLogging().getLevel()); assertEquals(Duration.ofSeconds(1), globalProperties.getClient().getHttp().getConnectTimeout()); assertEquals(Duration.ofSeconds(2), globalProperties.getClient().getHttp().getReadTimeout()); assertEquals(Duration.ofSeconds(3), globalProperties.getClient().getHttp().getWriteTimeout()); assertEquals(Duration.ofSeconds(4), globalProperties.getClient().getHttp().getResponseTimeout()); assertEquals("localhost", globalProperties.getProxy().getHttp().getNonProxyHosts()); } @Test void azureManagedIdentityClientIdFromEnv() { Properties properties = new Properties(); properties.put(PROPERTY_AZURE_CLIENT_ID, "client-id-from-env"); PropertiesPropertySource propertiesPropertySource = new PropertiesPropertySource("test-properties", properties); ConfigurableEnvironment environment = getEnvironment(propertiesPropertySource); AzureGlobalProperties globalProperties = Binder.get(environment) .bind(AzureGlobalProperties.PREFIX, AzureGlobalProperties.class).get(); assertEquals("client-id-from-env", globalProperties.getCredential().getClientId()); assertEquals("client-id-from-env", globalProperties.getCredential().getManagedIdentityClientId()); assertEquals(null, globalProperties.getCredential().getUsername()); } @Test @Test void azureSdkEnvShouldBindCorrect() { Properties properties = new Properties(); properties.put("AZURE_KEYVAULT_ENDPOINT", "test-endpoint"); properties.put("AZURE_EVENT_HUBS_CONNECTION_STRING", "test-connection-str"); PropertiesPropertySource propertiesPropertySource = new PropertiesPropertySource("test-properties", properties); ConfigurableEnvironment environment = getEnvironment(propertiesPropertySource); AzureEventHubsProperties eventHubsProperties = Binder.get(environment).bind(AzureEventHubsProperties.PREFIX, AzureEventHubsProperties.class).get(); AzureKeyVaultSecretProperties keyVaultSecretProperties = Binder.get(environment).bind(AzureKeyVaultSecretProperties.PREFIX, AzureKeyVaultSecretProperties.class).get(); assertEquals("test-connection-str", eventHubsProperties.getConnectionString()); assertEquals("test-endpoint", keyVaultSecretProperties.getEndpoint()); } private PropertiesPropertySource buildTestProperties(String key, String value) { Properties properties = new Properties(); properties.put(key, value); return new PropertiesPropertySource("test-properties", properties); } private ConfigurableEnvironment getEnvironment(PropertiesPropertySource propertiesPropertySource) { return getEnvironment(propertiesPropertySource, null); } private ConfigurableEnvironment getEnvironment(PropertiesPropertySource propertiesPropertySource, EnvironmentPostProcessor environmentPostProcessor) { ConfigurableEnvironment environment = new StandardServletEnvironment(); if (propertiesPropertySource != null) { environment.getPropertySources().addFirst(propertiesPropertySource); } if (environmentPostProcessor == null) { environmentPostProcessor = new AzureGlobalConfigurationEnvironmentPostProcessor(new DeferredLog()); } environmentPostProcessor.postProcessEnvironment(environment, null); return environment; } }
class AzureGlobalConfigurationEnvironmentPostProcessorTest { @Test void springPropertyShouldHaveValueIfAzureCoreEnvSet() { PropertiesPropertySource propertiesPropertySource = buildTestProperties(PROPERTY_AZURE_CLIENT_ID, "test-client-id"); ConfigurableEnvironment environment = getEnvironment(propertiesPropertySource); assertEquals("test-client-id", environment.getProperty(PROPERTY_AZURE_CLIENT_ID)); assertEquals("test-client-id", environment.getProperty("spring.cloud.azure.credential.client-id")); } @Test void springPropertyShouldHaveValueIfAzureKeyVaultEnvSet() { PropertiesPropertySource propertiesPropertySource = buildTestProperties("AZURE_KEYVAULT_ENDPOINT", "test-endpoint"); ConfigurableEnvironment environment = getEnvironment(propertiesPropertySource); assertEquals("test-endpoint", environment.getProperty("AZURE_KEYVAULT_ENDPOINT")); assertEquals("test-endpoint", environment.getProperty("spring.cloud.azure.keyvault.secret.endpoint")); assertEquals("test-endpoint", environment.getProperty("spring.cloud.azure.keyvault.certificate.endpoint")); } @Test void springPropertyShouldHaveValueIfAzureEventHubsEnvSet() { PropertiesPropertySource propertiesPropertySource = buildTestProperties("AZURE_EVENT_HUBS_CONNECTION_STRING", "test-connection-string"); ConfigurableEnvironment environment = getEnvironment(propertiesPropertySource); assertEquals("test-connection-string", environment.getProperty("AZURE_EVENT_HUBS_CONNECTION_STRING")); assertEquals("test-connection-string", environment.getProperty("spring.cloud.azure.eventhubs.connection-string")); } @Test void azureCoreEnvShouldNotBeTakenIfSpringPropertiesSet() { Properties properties = new Properties(); properties.put(PROPERTY_AZURE_CLIENT_ID, "core-client-id"); properties.put("spring.cloud.azure.credential.client-id", "spring-client-id"); PropertiesPropertySource propertiesPropertySource = new PropertiesPropertySource("test-properties", properties); ConfigurableEnvironment environment = getEnvironment(propertiesPropertySource); assertEquals("core-client-id", environment.getProperty(PROPERTY_AZURE_CLIENT_ID)); assertEquals("spring-client-id", environment.getProperty("spring.cloud.azure.credential.client-id")); } @Test void azureSdkEnvShouldNotBeTakenIfSpringPropertiesSet() { Properties properties = new Properties(); properties.put("AZURE_KEYVAULT_ENDPOINT", "sdk-endpoint"); properties.put("spring.cloud.azure.keyvault.secret.endpoint", "spring-endpoint"); PropertiesPropertySource propertiesPropertySource = new PropertiesPropertySource("test-properties", properties); ConfigurableEnvironment environment = getEnvironment(propertiesPropertySource); assertEquals("sdk-endpoint", environment.getProperty("AZURE_KEYVAULT_ENDPOINT")); assertEquals("spring-endpoint", environment.getProperty("spring.cloud.azure.keyvault.secret.endpoint")); } @Test void azureCoreEnvShouldBindCorrect() { Properties properties = new Properties(); properties.put(PROPERTY_AZURE_CLIENT_ID, "core-client-id"); properties.put(PROPERTY_AZURE_CLIENT_SECRET, "core-client-secret"); properties.put(PROPERTY_AZURE_CLIENT_CERTIFICATE_PATH, "core-client-cert-path"); properties.put(PROPERTY_AZURE_USERNAME, "core-username"); properties.put(PROPERTY_AZURE_PASSWORD, "core-password"); properties.put(PROPERTY_AZURE_TENANT_ID, "core-tenant-id"); properties.put(PROPERTY_AZURE_SUBSCRIPTION_ID, "core-sub-id"); properties.put(PROPERTY_AZURE_CLOUD, "other"); properties.put(PROPERTY_AZURE_AUTHORITY_HOST, "aad"); properties.put(PROPERTY_AZURE_REQUEST_RETRY_COUNT, 3); properties.put(PROPERTY_AZURE_HTTP_LOG_DETAIL_LEVEL, "headers"); properties.put(PROPERTY_AZURE_REQUEST_CONNECT_TIMEOUT, 1000); properties.put(PROPERTY_AZURE_REQUEST_READ_TIMEOUT, 2000); properties.put(PROPERTY_AZURE_REQUEST_WRITE_TIMEOUT, 3000); properties.put(PROPERTY_AZURE_REQUEST_RESPONSE_TIMEOUT, 4000); properties.put(PROPERTY_NO_PROXY, "localhost"); PropertiesPropertySource propertiesPropertySource = new PropertiesPropertySource("test-properties", properties); ConfigurableEnvironment environment = getEnvironment(propertiesPropertySource); AzureGlobalProperties globalProperties = Binder.get(environment).bind(AzureGlobalProperties.PREFIX, AzureGlobalProperties.class).get(); assertEquals("core-client-id", globalProperties.getCredential().getClientId()); assertEquals("core-client-secret", globalProperties.getCredential().getClientSecret()); assertEquals("core-client-cert-path", globalProperties.getCredential().getClientCertificatePath()); assertEquals("core-username", globalProperties.getCredential().getUsername()); assertEquals("core-password", globalProperties.getCredential().getPassword()); assertEquals("core-tenant-id", globalProperties.getProfile().getTenantId()); assertEquals("core-sub-id", globalProperties.getProfile().getSubscriptionId()); assertEquals(AzureProfileAware.CloudType.OTHER, globalProperties.getProfile().getCloud()); assertEquals("aad", globalProperties.getProfile().getEnvironment().getActiveDirectoryEndpoint()); assertEquals(3, globalProperties.getRetry().getMaxAttempts()); assertEquals(HttpLogDetailLevel.HEADERS, globalProperties.getClient().getHttp().getLogging().getLevel()); assertEquals(Duration.ofSeconds(1), globalProperties.getClient().getHttp().getConnectTimeout()); assertEquals(Duration.ofSeconds(2), globalProperties.getClient().getHttp().getReadTimeout()); assertEquals(Duration.ofSeconds(3), globalProperties.getClient().getHttp().getWriteTimeout()); assertEquals(Duration.ofSeconds(4), globalProperties.getClient().getHttp().getResponseTimeout()); assertEquals("localhost", globalProperties.getProxy().getHttp().getNonProxyHosts()); } @Test void azureManagedIdentityClientIdFromEnv() { Properties properties = new Properties(); properties.put(PROPERTY_AZURE_CLIENT_ID, "client-id-from-env"); PropertiesPropertySource propertiesPropertySource = new PropertiesPropertySource("test-properties", properties); ConfigurableEnvironment environment = getEnvironment(propertiesPropertySource); AzureGlobalProperties globalProperties = Binder.get(environment) .bind(AzureGlobalProperties.PREFIX, AzureGlobalProperties.class).get(); assertEquals("client-id-from-env", globalProperties.getCredential().getClientId()); assertEquals("client-id-from-env", globalProperties.getCredential().getManagedIdentityClientId()); assertEquals(null, globalProperties.getCredential().getUsername()); } @Test @Test void azureSdkEnvShouldBindCorrect() { Properties properties = new Properties(); properties.put("AZURE_KEYVAULT_ENDPOINT", "test-endpoint"); properties.put("AZURE_EVENT_HUBS_CONNECTION_STRING", "test-connection-str"); PropertiesPropertySource propertiesPropertySource = new PropertiesPropertySource("test-properties", properties); ConfigurableEnvironment environment = getEnvironment(propertiesPropertySource); AzureEventHubsProperties eventHubsProperties = Binder.get(environment).bind(AzureEventHubsProperties.PREFIX, AzureEventHubsProperties.class).get(); AzureKeyVaultSecretProperties keyVaultSecretProperties = Binder.get(environment).bind(AzureKeyVaultSecretProperties.PREFIX, AzureKeyVaultSecretProperties.class).get(); assertEquals("test-connection-str", eventHubsProperties.getConnectionString()); assertEquals("test-endpoint", keyVaultSecretProperties.getEndpoint()); } private PropertiesPropertySource buildTestProperties(String key, String value) { Properties properties = new Properties(); properties.put(key, value); return new PropertiesPropertySource("test-properties", properties); } private ConfigurableEnvironment getEnvironment(PropertiesPropertySource propertiesPropertySource) { return getEnvironment(propertiesPropertySource, null); } private ConfigurableEnvironment getEnvironment(PropertiesPropertySource propertiesPropertySource, EnvironmentPostProcessor environmentPostProcessor) { ConfigurableEnvironment environment = new StandardServletEnvironment(); if (propertiesPropertySource != null) { environment.getPropertySources().addFirst(propertiesPropertySource); } if (environmentPostProcessor == null) { environmentPostProcessor = new AzureGlobalConfigurationEnvironmentPostProcessor(new DeferredLog()); } environmentPostProcessor.postProcessEnvironment(environment, null); return environment; } }
This is not correct, if we return `Mono.empty()`, the subscriber of this mono won't be able to perform actions upon the signal.
private Mono<Void> doSend(String destination, List<EventData> events, PartitionSupplier partitionSupplier) { EventHubProducerAsyncClient producer = producerFactory.createProducer(destination); CreateBatchOptions options = buildCreateBatchOptions(partitionSupplier); AtomicReference<EventDataBatch> currentBatch = new AtomicReference<>( producer.createBatch(options).block()); Flux.fromIterable(events).flatMap(event -> { final EventDataBatch batch = currentBatch.get(); if (batch.tryAdd(event)) { return Mono.empty(); } return Mono.when( producer.send(batch), producer.createBatch(options).map(newBatch -> { currentBatch.set(newBatch); if (!newBatch.tryAdd(event)) { throw Exceptions.propagate(new IllegalArgumentException( "Event was too large to fit in an empty batch. Max size: " + newBatch.getMaxSizeInBytes())); } return newBatch; })); }).then() .doFinally(signal -> { final EventDataBatch batch = currentBatch.getAndSet(null); if (batch != null && batch.getCount() > 0) { producer.send(batch).block(); } }).subscribe(); return Mono.empty(); }
return Mono.empty();
private Mono<Void> doSend(String destination, List<EventData> events, PartitionSupplier partitionSupplier) { EventHubProducerAsyncClient producer = producerFactory.createProducer(destination); CreateBatchOptions options = buildCreateBatchOptions(partitionSupplier); AtomicReference<EventDataBatch> currentBatch = new AtomicReference<>( producer.createBatch(options).block()); Flux.fromIterable(events).flatMap(event -> { final EventDataBatch batch = currentBatch.get(); try { if (batch.tryAdd(event)) { return Mono.empty(); } else { LOGGER.warn("EventDataBatch is full in the collect process or the first event is " + "too large to fit in an empty batch! Max size: {}", batch.getMaxSizeInBytes()); } } catch (AmqpException e) { LOGGER.error("Event is larger than maximum allowed size.", e); return Mono.empty(); } return Mono.when( producer.send(batch), producer.createBatch(options).map(newBatch -> { currentBatch.set(newBatch); try { if (!newBatch.tryAdd(event)) { LOGGER.error( "Event was too large to fit in an empty batch. Max size:{} ", newBatch.getMaxSizeInBytes()); } } catch (AmqpException e) { LOGGER.error("Event was too large to fit in an empty batch. Max size:{}", newBatch.getMaxSizeInBytes(), e); } return newBatch; })); }) .then() .block(); final EventDataBatch batch = currentBatch.getAndSet(null); return producer.send(batch); }
class EventHubsTemplate implements SendOperation, BatchSendOperation { private static final Logger LOGGER = LoggerFactory.getLogger(EventHubsTemplate.class); private final EventHubsProducerFactory producerFactory; private EventHubsMessageConverter messageConverter = new EventHubsMessageConverter(); public EventHubsTemplate(EventHubsProducerFactory producerFactory) { this.producerFactory = producerFactory; } @Override public <T> Mono<Void> sendAsync(String destination, Collection<Message<T>> messages, PartitionSupplier partitionSupplier) { List<EventData> eventData = messages.stream() .map(m -> messageConverter.fromMessage(m, EventData.class)) .collect(Collectors.toList()); return doSend(destination, eventData, partitionSupplier); } @Override public <T> Mono<Void> sendAsync(String destination, Message<T> message, PartitionSupplier partitionSupplier) { return sendAsync(destination, Collections.singleton(message), partitionSupplier); } CreateBatchOptions buildCreateBatchOptions(PartitionSupplier partitionSupplier) { return new CreateBatchOptions() .setPartitionId(partitionSupplier != null ? partitionSupplier.getPartitionId() : null) .setPartitionKey(partitionSupplier != null ? partitionSupplier.getPartitionKey() : null); } public void setMessageConverter(EventHubsMessageConverter messageConverter) { this.messageConverter = messageConverter; } }
class EventHubsTemplate implements SendOperation, BatchSendOperation { private static final Logger LOGGER = LoggerFactory.getLogger(EventHubsTemplate.class); private final EventHubsProducerFactory producerFactory; private EventHubsMessageConverter messageConverter = new EventHubsMessageConverter(); public EventHubsTemplate(EventHubsProducerFactory producerFactory) { this.producerFactory = producerFactory; } @Override public <T> Mono<Void> sendAsync(String destination, Collection<Message<T>> messages, PartitionSupplier partitionSupplier) { List<EventData> eventData = messages.stream() .map(m -> messageConverter.fromMessage(m, EventData.class)) .collect(Collectors.toList()); return doSend(destination, eventData, partitionSupplier); } @Override public <T> Mono<Void> sendAsync(String destination, Message<T> message, PartitionSupplier partitionSupplier) { return sendAsync(destination, Collections.singleton(message), partitionSupplier); } private CreateBatchOptions buildCreateBatchOptions(PartitionSupplier partitionSupplier) { return new CreateBatchOptions() .setPartitionId(partitionSupplier != null ? partitionSupplier.getPartitionId() : null) .setPartitionKey(partitionSupplier != null ? partitionSupplier.getPartitionKey() : null); } public void setMessageConverter(EventHubsMessageConverter messageConverter) { this.messageConverter = messageConverter; } }
Are we changing this for the test?
private Mono<Void> doSend(String destination, List<EventData> events, PartitionSupplier partitionSupplier) { EventHubProducerAsyncClient producer = producerFactory.createProducer(destination); CreateBatchOptions options = buildCreateBatchOptions(partitionSupplier); AtomicReference<EventDataBatch> currentBatch = new AtomicReference<>( producer.createBatch(options).block()); Flux.fromIterable(events).flatMap(event -> { final EventDataBatch batch = currentBatch.get(); if (batch.tryAdd(event)) { return Mono.empty(); } return Mono.when( producer.send(batch), producer.createBatch(options).map(newBatch -> { currentBatch.set(newBatch); if (!newBatch.tryAdd(event)) { throw Exceptions.propagate(new IllegalArgumentException( "Event was too large to fit in an empty batch. Max size: " + newBatch.getMaxSizeInBytes())); } return newBatch; })); }).then() .doFinally(signal -> { final EventDataBatch batch = currentBatch.getAndSet(null); if (batch != null && batch.getCount() > 0) { producer.send(batch).block(); } }).subscribe(); return Mono.empty(); }
return Mono.when(
private Mono<Void> doSend(String destination, List<EventData> events, PartitionSupplier partitionSupplier) { EventHubProducerAsyncClient producer = producerFactory.createProducer(destination); CreateBatchOptions options = buildCreateBatchOptions(partitionSupplier); AtomicReference<EventDataBatch> currentBatch = new AtomicReference<>( producer.createBatch(options).block()); Flux.fromIterable(events).flatMap(event -> { final EventDataBatch batch = currentBatch.get(); try { if (batch.tryAdd(event)) { return Mono.empty(); } else { LOGGER.warn("EventDataBatch is full in the collect process or the first event is " + "too large to fit in an empty batch! Max size: {}", batch.getMaxSizeInBytes()); } } catch (AmqpException e) { LOGGER.error("Event is larger than maximum allowed size.", e); return Mono.empty(); } return Mono.when( producer.send(batch), producer.createBatch(options).map(newBatch -> { currentBatch.set(newBatch); try { if (!newBatch.tryAdd(event)) { LOGGER.error( "Event was too large to fit in an empty batch. Max size:{} ", newBatch.getMaxSizeInBytes()); } } catch (AmqpException e) { LOGGER.error("Event was too large to fit in an empty batch. Max size:{}", newBatch.getMaxSizeInBytes(), e); } return newBatch; })); }) .then() .block(); final EventDataBatch batch = currentBatch.getAndSet(null); return producer.send(batch); }
class EventHubsTemplate implements SendOperation, BatchSendOperation { private static final Logger LOGGER = LoggerFactory.getLogger(EventHubsTemplate.class); private final EventHubsProducerFactory producerFactory; private EventHubsMessageConverter messageConverter = new EventHubsMessageConverter(); public EventHubsTemplate(EventHubsProducerFactory producerFactory) { this.producerFactory = producerFactory; } @Override public <T> Mono<Void> sendAsync(String destination, Collection<Message<T>> messages, PartitionSupplier partitionSupplier) { List<EventData> eventData = messages.stream() .map(m -> messageConverter.fromMessage(m, EventData.class)) .collect(Collectors.toList()); return doSend(destination, eventData, partitionSupplier); } @Override public <T> Mono<Void> sendAsync(String destination, Message<T> message, PartitionSupplier partitionSupplier) { return sendAsync(destination, Collections.singleton(message), partitionSupplier); } CreateBatchOptions buildCreateBatchOptions(PartitionSupplier partitionSupplier) { return new CreateBatchOptions() .setPartitionId(partitionSupplier != null ? partitionSupplier.getPartitionId() : null) .setPartitionKey(partitionSupplier != null ? partitionSupplier.getPartitionKey() : null); } public void setMessageConverter(EventHubsMessageConverter messageConverter) { this.messageConverter = messageConverter; } }
class EventHubsTemplate implements SendOperation, BatchSendOperation { private static final Logger LOGGER = LoggerFactory.getLogger(EventHubsTemplate.class); private final EventHubsProducerFactory producerFactory; private EventHubsMessageConverter messageConverter = new EventHubsMessageConverter(); public EventHubsTemplate(EventHubsProducerFactory producerFactory) { this.producerFactory = producerFactory; } @Override public <T> Mono<Void> sendAsync(String destination, Collection<Message<T>> messages, PartitionSupplier partitionSupplier) { List<EventData> eventData = messages.stream() .map(m -> messageConverter.fromMessage(m, EventData.class)) .collect(Collectors.toList()); return doSend(destination, eventData, partitionSupplier); } @Override public <T> Mono<Void> sendAsync(String destination, Message<T> message, PartitionSupplier partitionSupplier) { return sendAsync(destination, Collections.singleton(message), partitionSupplier); } private CreateBatchOptions buildCreateBatchOptions(PartitionSupplier partitionSupplier) { return new CreateBatchOptions() .setPartitionId(partitionSupplier != null ? partitionSupplier.getPartitionId() : null) .setPartitionKey(partitionSupplier != null ? partitionSupplier.getPartitionKey() : null); } public void setMessageConverter(EventHubsMessageConverter messageConverter) { this.messageConverter = messageConverter; } }
Can we use https://projectreactor.io/docs/test/release/api/reactor/test/StepVerifier.html here?
void testSendAsyncForMessagesWithTheSecondEventTooLargeForOneNewBatch() { EventDataBatch eventDataBatch = mock(EventDataBatch.class); when(this.mockProducerClient.createBatch(any(CreateBatchOptions.class))) .thenReturn(Mono.just(eventDataBatch)); when(eventDataBatch.tryAdd(any(EventData.class))).thenReturn(true, false, false).thenReturn(true, true, true); when(eventDataBatch.getCount()).thenReturn(5); List<String> messagesList = new ArrayList<>(); for (int i = 0; i < 5; i++) { messagesList.add("abcde"); } List<Message<String>> messages = messagesList.stream().map((Function<String, GenericMessage<String>>) GenericMessage::new).collect(Collectors.toList()); this.eventHubsTemplate.sendAsync(this.destination, messages, null).doOnError(ex -> { System.out.println("do on Error" + ex.getMessage()); ex.printStackTrace(); }).doOnSuccess(t -> { System.out.println("do on success:" + t); }).block(Duration.ofSeconds(10)); verify(this.mockProducerClient, times(2)).send(any(EventDataBatch.class)); }
}).block(Duration.ofSeconds(10));
void testSendAsyncForMessagesWithTheSecondEventTooLargeForOneNewBatch() { EventDataBatch eventDataBatch = mock(EventDataBatch.class); when(this.mockProducerClient.createBatch(any(CreateBatchOptions.class))) .thenReturn(Mono.just(eventDataBatch)); when(eventDataBatch.tryAdd(any(EventData.class))).thenReturn(true, false, false).thenReturn(true, true, true); when(eventDataBatch.getCount()).thenReturn(5); List<String> messagesList = new ArrayList<>(); for (int i = 0; i < 5; i++) { messagesList.add("abcde"); } List<Message<String>> messages = messagesList.stream().map((Function<String, GenericMessage<String>>) GenericMessage::new).collect(Collectors.toList()); Mono<Void> mono = this.eventHubsTemplate.sendAsync(this.destination, messages, null).doOnError(ex -> { System.out.println("do on Error" + ex.getMessage()); ex.printStackTrace(); }).doOnSuccess(t -> { System.out.println("do on success:" + t); }); StepVerifier.create(mono) .verifyComplete(); verify(this.mockProducerClient, times(2)).send(any(EventDataBatch.class)); }
class EventHubsTemplateTest { private EventHubProducerAsyncClient mockProducerClient; protected String destination = "event-hub"; protected Mono<Void> mono = Mono.empty(); private EventHubsTemplate eventHubsTemplate; @BeforeEach public void setUp() { EventHubsProducerFactory producerFactory = mock(EventHubsProducerFactory.class); this.mockProducerClient = mock(EventHubProducerAsyncClient.class); when(producerFactory.createProducer(this.destination)).thenReturn(this.mockProducerClient); this.eventHubsTemplate = spy(new EventHubsTemplate(producerFactory)); when(this.mockProducerClient.send(any(EventDataBatch.class))).thenReturn(this.mono); } /** * test the three batches case */ @Test void testSendAsyncForMessagesWithThreeBatch() { EventDataBatch eventDataBatch = mock(EventDataBatch.class); when(this.mockProducerClient.createBatch(any(CreateBatchOptions.class))) .thenReturn(Mono.just(eventDataBatch)); when(eventDataBatch.tryAdd(any(EventData.class))).thenReturn(true, true, false, true, true, false, true); when(eventDataBatch.getCount()).thenReturn(2, 2, 1); List<String> messagesList = new ArrayList<>(); for (int i = 0; i < 5; i++) { messagesList.add("abcde"); } List<Message<String>> messages = messagesList.stream().map((Function<String, GenericMessage<String>>) GenericMessage::new).collect(Collectors.toList()); this.eventHubsTemplate.sendAsync(this.destination, messages, null).block(); verify(this.mockProducerClient, times(3)).send(any(EventDataBatch.class)); } /** * test the normal one batch case */ @Test void testSendAsyncForMessagesOneBatchAndSendCompletely() { EventDataBatch eventDataBatch = mock(EventDataBatch.class); when(this.mockProducerClient.createBatch(any(CreateBatchOptions.class))) .thenReturn(Mono.just(eventDataBatch)); when(eventDataBatch.tryAdd(any(EventData.class))).thenReturn(true, true, true, true, true); when(eventDataBatch.getCount()).thenReturn(5); List<String> messagesList = new ArrayList<>(); for (int i = 0; i < 5; i++) { messagesList.add("abcde"); } List<Message<String>> messages = messagesList.stream().map((Function<String, GenericMessage<String>>) GenericMessage::new).collect(Collectors.toList()); this.eventHubsTemplate.sendAsync(this.destination, messages, null).doOnSuccess(t -> { System.out.println("do on success:" + t); }).block(); verify(this.mockProducerClient, times(1)).send(any(EventDataBatch.class)); } /** * test the normal one batch case with one exception at the first */ @Test void testSendAsyncForMessagesOneBatchAndSendCompletelyWithException() { EventDataBatch eventDataBatch = mock(EventDataBatch.class); when(this.mockProducerClient.createBatch(any(CreateBatchOptions.class))) .thenReturn(Mono.just(eventDataBatch)); when(eventDataBatch.tryAdd(any(EventData.class))).thenThrow(new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", 1024 * 1024 / 1024), null)).thenReturn(true, true, true, true); when(eventDataBatch.getCount()).thenReturn(5); List<String> messagesList = new ArrayList<>(); for (int i = 0; i < 5; i++) { messagesList.add("abcde"); } List<Message<String>> messages = messagesList.stream().map((Function<String, GenericMessage<String>>) GenericMessage::new).collect(Collectors.toList()); this.eventHubsTemplate.sendAsync(this.destination, messages, null).doOnError(ex -> { System.out.println("do on Error"); ex.printStackTrace(); }).doOnSuccess(t -> { System.out.println("do on success:" + t); }).block(Duration.ofSeconds(10)); verify(this.mockProducerClient, times(1)).send(any(EventDataBatch.class)); } /** * test the normal two batch case with one exception in the middle */ @Test void testSendAsyncForMessagesTwoBatchAndSendCompletelyWithException() { EventDataBatch eventDataBatch = mock(EventDataBatch.class); when(this.mockProducerClient.createBatch(any(CreateBatchOptions.class))) .thenReturn(Mono.just(eventDataBatch)); when(eventDataBatch.tryAdd(any(EventData.class))).thenReturn(true, false, true). thenThrow(new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, String.format(Locale.US, "Size of the payload exceeded " + "maximum message size: %s kb", 1024 * 1024 / 1024), null)).thenReturn(true, true); when(eventDataBatch.getCount()).thenReturn(5); List<String> messagesList = new ArrayList<>(); for (int i = 0; i < 5; i++) { messagesList.add("abcde"); } List<Message<String>> messages = messagesList.stream().map((Function<String, GenericMessage<String>>) GenericMessage::new).collect(Collectors.toList()); this.eventHubsTemplate.sendAsync(this.destination, messages, null).doOnError(ex -> { System.out.println("do on Error"); ex.printStackTrace(); }).doOnSuccess(t -> { System.out.println("do on success:" + t); }).block(Duration.ofSeconds(10)); verify(this.mockProducerClient, times(2)).send(any(EventDataBatch.class)); } /** * test the case that the second event is too large for one new batch */ @Test }
class EventHubsTemplateTest { private EventHubProducerAsyncClient mockProducerClient; protected String destination = "event-hub"; protected Mono<Void> empty = Mono.empty(); private EventHubsTemplate eventHubsTemplate; @BeforeEach public void setUp() { EventHubsProducerFactory producerFactory = mock(EventHubsProducerFactory.class); this.mockProducerClient = mock(EventHubProducerAsyncClient.class); when(producerFactory.createProducer(this.destination)).thenReturn(this.mockProducerClient); this.eventHubsTemplate = spy(new EventHubsTemplate(producerFactory)); when(this.mockProducerClient.send(any(EventDataBatch.class))).thenReturn(this.empty); } /** * test the three batches case */ @Test void testSendAsyncForMessagesWithThreeBatch() { EventDataBatch eventDataBatch = mock(EventDataBatch.class); when(this.mockProducerClient.createBatch(any(CreateBatchOptions.class))) .thenReturn(Mono.just(eventDataBatch)); when(eventDataBatch.tryAdd(any(EventData.class))).thenReturn(true, true, false, true, true, false, true); when(eventDataBatch.getCount()).thenReturn(2, 2, 1); List<String> messagesList = new ArrayList<>(); for (int i = 0; i < 5; i++) { messagesList.add("abcde"); } List<Message<String>> messages = messagesList.stream().map((Function<String, GenericMessage<String>>) GenericMessage::new).collect(Collectors.toList()); Mono<Void> mono = this.eventHubsTemplate.sendAsync(this.destination, messages, null); StepVerifier.create(mono) .verifyComplete(); verify(this.mockProducerClient, times(3)).send(any(EventDataBatch.class)); } /** * test the normal one batch case */ @Test void testSendAsyncForMessagesOneBatchAndSendCompletely() { EventDataBatch eventDataBatch = mock(EventDataBatch.class); when(this.mockProducerClient.createBatch(any(CreateBatchOptions.class))) .thenReturn(Mono.just(eventDataBatch)); when(eventDataBatch.tryAdd(any(EventData.class))).thenReturn(true, true, true, true, true); when(eventDataBatch.getCount()).thenReturn(5); List<String> messagesList = new ArrayList<>(); for (int i = 0; i < 5; i++) { messagesList.add("abcde"); } List<Message<String>> messages = messagesList.stream().map((Function<String, GenericMessage<String>>) GenericMessage::new).collect(Collectors.toList()); Mono<Void> mono = this.eventHubsTemplate.sendAsync(this.destination, messages, null).doOnSuccess(t -> { System.out.println("do on success:" + t); }); StepVerifier.create(mono) .verifyComplete(); verify(this.mockProducerClient, times(1)).send(any(EventDataBatch.class)); } /** * test the normal one batch case with one exception at the first */ @Test void testSendAsyncForMessagesOneBatchAndSendCompletelyWithException() { EventDataBatch eventDataBatch = mock(EventDataBatch.class); when(this.mockProducerClient.createBatch(any(CreateBatchOptions.class))) .thenReturn(Mono.just(eventDataBatch)); when(eventDataBatch.tryAdd(any(EventData.class))).thenThrow(new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", 1024 * 1024 / 1024), null)).thenReturn(true, true, true, true); when(eventDataBatch.getCount()).thenReturn(5); List<String> messagesList = new ArrayList<>(); for (int i = 0; i < 5; i++) { messagesList.add("abcde"); } List<Message<String>> messages = messagesList.stream().map((Function<String, GenericMessage<String>>) GenericMessage::new).collect(Collectors.toList()); Mono<Void> mono = this.eventHubsTemplate.sendAsync(this.destination, messages, null).doOnError(ex -> { System.out.println("do on Error"); ex.printStackTrace(); }).doOnSuccess(t -> { System.out.println("do on success:" + t); }); StepVerifier.create(mono) .verifyComplete(); verify(this.mockProducerClient, times(1)).send(any(EventDataBatch.class)); } /** * test the normal two batch case with one exception in the middle */ @Test void testSendAsyncForMessagesTwoBatchAndSendCompletelyWithException() { EventDataBatch eventDataBatch = mock(EventDataBatch.class); when(this.mockProducerClient.createBatch(any(CreateBatchOptions.class))) .thenReturn(Mono.just(eventDataBatch)); when(eventDataBatch.tryAdd(any(EventData.class))).thenReturn(true, false, true). thenThrow(new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, String.format(Locale.US, "Size of the payload exceeded " + "maximum message size: %s kb", 1024 * 1024 / 1024), null)).thenReturn(true, true); when(eventDataBatch.getCount()).thenReturn(5); List<String> messagesList = new ArrayList<>(); for (int i = 0; i < 5; i++) { messagesList.add("abcde"); } List<Message<String>> messages = messagesList.stream().map((Function<String, GenericMessage<String>>) GenericMessage::new).collect(Collectors.toList()); Mono<Void> mono = this.eventHubsTemplate.sendAsync(this.destination, messages, null).doOnError(ex -> { System.out.println("do on Error"); ex.printStackTrace(); }).doOnSuccess(t -> { System.out.println("do on success:" + t); }); StepVerifier.create(mono) .verifyComplete(); verify(this.mockProducerClient, times(2)).send(any(EventDataBatch.class)); } /** * test the case that the second event is too large for one new batch */ @Test }
How about extracting this as a method?
protected void configureService(CosmosClientBuilder builder) { PropertyMapper map = new PropertyMapper(); map.from(this.cosmosProperties.getEndpoint()).to(builder::endpoint); map.from(this.cosmosProperties.getConsistencyLevel()).to(builder::consistencyLevel); map.from(this.cosmosProperties.getClientTelemetryEnabled()).to(builder::clientTelemetryEnabled); map.from(this.cosmosProperties.getConnectionSharingAcrossClientsEnabled()).to(builder::connectionSharingAcrossClientsEnabled); map.from(this.cosmosProperties.getContentResponseOnWriteEnabled()).to(builder::contentResponseOnWriteEnabled); map.from(this.cosmosProperties.getEndpointDiscoveryEnabled()).to(builder::endpointDiscoveryEnabled); map.from(this.cosmosProperties.getMultipleWriteRegionsEnabled()).to(builder::multipleWriteRegionsEnabled); map.from(this.cosmosProperties.getReadRequestsFallbackEnabled()).to(builder::readRequestsFallbackEnabled); map.from(this.cosmosProperties.getSessionCapturingOverrideEnabled()).to(builder::sessionCapturingOverrideEnabled); map.from(this.cosmosProperties.getPreferredRegions()).whenNot(List::isEmpty).to(builder::preferredRegions); ThrottlingRetryOptions retryOptions = this.cosmosProperties.getThrottlingRetryOptions(); if (this.throttlingRetryOptions != null && isDefaultThrottlingRetryOptions(retryOptions)) { map.from(this.throttlingRetryOptions).to(builder::throttlingRetryOptions); LOGGER.debug("The throttling retry options is not configured, " + "then the Azure Spring Retry configuration will be applied to Cosmos service builder."); } else { map.from(retryOptions).to(builder::throttlingRetryOptions); } map.from(this.cosmosProperties.getResourceToken()).to(builder::resourceToken); map.from(this.cosmosProperties.getPermissions()).whenNot(List::isEmpty).to(builder::permissions); GatewayConnectionConfig gatewayConnection = this.cosmosProperties.getGatewayConnection(); if (proxyOptions != null && gatewayConnection.getProxy() == null) { gatewayConnection.setProxy(proxyOptions); LOGGER.debug("The proxy of the Gateway connection is not configured, " + "then the Azure Spring Proxy configuration will be applied to Cosmos gateway connection."); } if (ConnectionMode.DIRECT.equals(this.cosmosProperties.getConnectionMode())) { builder.directMode(this.cosmosProperties.getDirectConnection(), gatewayConnection); } else if (ConnectionMode.GATEWAY.equals(this.cosmosProperties.getConnectionMode())) { builder.gatewayMode(gatewayConnection); } }
}
protected void configureService(CosmosClientBuilder builder) { PropertyMapper map = new PropertyMapper(); map.from(this.cosmosProperties.getEndpoint()).to(builder::endpoint); map.from(this.cosmosProperties.getConsistencyLevel()).to(builder::consistencyLevel); map.from(this.cosmosProperties.getClientTelemetryEnabled()).to(builder::clientTelemetryEnabled); map.from(this.cosmosProperties.getConnectionSharingAcrossClientsEnabled()).to(builder::connectionSharingAcrossClientsEnabled); map.from(this.cosmosProperties.getContentResponseOnWriteEnabled()).to(builder::contentResponseOnWriteEnabled); map.from(this.cosmosProperties.getEndpointDiscoveryEnabled()).to(builder::endpointDiscoveryEnabled); map.from(this.cosmosProperties.getMultipleWriteRegionsEnabled()).to(builder::multipleWriteRegionsEnabled); map.from(this.cosmosProperties.getReadRequestsFallbackEnabled()).to(builder::readRequestsFallbackEnabled); map.from(this.cosmosProperties.getSessionCapturingOverrideEnabled()).to(builder::sessionCapturingOverrideEnabled); map.from(this.cosmosProperties.getPreferredRegions()).whenNot(List::isEmpty).to(builder::preferredRegions); configureThrottlingRetryOptions(builder, map); configureConnection(builder, map); }
class CosmosClientBuilderFactory extends AbstractAzureServiceClientBuilderFactory<CosmosClientBuilder> { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosClientBuilderFactory.class); private final CosmosProperties cosmosProperties; private ProxyOptions proxyOptions; private ThrottlingRetryOptions throttlingRetryOptions; public CosmosClientBuilderFactory(CosmosProperties cosmosProperties) { this.cosmosProperties = cosmosProperties; } @Override protected CosmosClientBuilder createBuilderInstance() { return new CosmosClientBuilder(); } @Override protected AzureProperties getAzureProperties() { return this.cosmosProperties; } @Override protected List<AuthenticationDescriptor<?>> getAuthenticationDescriptors(CosmosClientBuilder builder) { return Arrays.asList( new KeyAuthenticationDescriptor(provider -> builder.credential(provider.getCredential())), new TokenAuthenticationDescriptor(provider -> builder.credential(provider.getCredential())) ); } @Override protected void configureProxy(CosmosClientBuilder builder) { ProxyAware.Proxy proxy = this.cosmosProperties.getProxy(); this.proxyOptions = HTTP_PROXY_CONVERTER.convert(proxy); if (this.proxyOptions == null) { LOGGER.debug("No proxy properties available."); } } @Override protected void configureRetry(CosmosClientBuilder builder) { RetryAware.Retry retry = this.cosmosProperties.getRetry(); if (isInvalidRetry(retry)) { return; } this.throttlingRetryOptions = new ThrottlingRetryOptions(); this.throttlingRetryOptions.setMaxRetryWaitTime(retry.getTimeout()); this.throttlingRetryOptions.setMaxRetryAttemptsOnThrottledRequests(retry.getMaxAttempts()); } @Override /** * Check if the retry option is the default value, which is defined in azure-cosmos SDK. * @param retryOptions retry options to be checked * @return result */ private boolean isDefaultThrottlingRetryOptions(ThrottlingRetryOptions retryOptions) { if (retryOptions.getMaxRetryWaitTime().equals(Duration.ofSeconds(30)) && retryOptions.getMaxRetryAttemptsOnThrottledRequests() == 9) { return true; } return false; } /** * Check if the properties of the retry is invalid value. * @param retry retry options to be checked * @return result */ private boolean isInvalidRetry(RetryAware.Retry retry) { if (retry.getMaxAttempts() == null || retry.getTimeout() == null) { return true; } return false; } @Override protected BiConsumer<CosmosClientBuilder, String> consumeApplicationId() { return CosmosClientBuilder::userAgentSuffix; } @Override protected BiConsumer<CosmosClientBuilder, Configuration> consumeConfiguration() { LOGGER.warn("Configuration instance is not supported to configure in CosmosClientBuilder"); return (a, b) -> { }; } @Override protected BiConsumer<CosmosClientBuilder, TokenCredential> consumeDefaultTokenCredential() { return CosmosClientBuilder::credential; } @Override protected BiConsumer<CosmosClientBuilder, String> consumeConnectionString() { LOGGER.debug("Connection string is not supported to configure in CosmosClientBuilder"); return (a, b) -> { }; } }
class CosmosClientBuilderFactory extends AbstractAzureServiceClientBuilderFactory<CosmosClientBuilder> { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosClientBuilderFactory.class); private final CosmosProperties cosmosProperties; private ProxyOptions proxyOptions; private ThrottlingRetryOptions throttlingRetryOptions; public CosmosClientBuilderFactory(CosmosProperties cosmosProperties) { this.cosmosProperties = cosmosProperties; } @Override protected CosmosClientBuilder createBuilderInstance() { return new CosmosClientBuilder(); } @Override protected AzureProperties getAzureProperties() { return this.cosmosProperties; } @Override protected List<AuthenticationDescriptor<?>> getAuthenticationDescriptors(CosmosClientBuilder builder) { return Arrays.asList( new KeyAuthenticationDescriptor(provider -> builder.credential(provider.getCredential())), new TokenAuthenticationDescriptor(provider -> builder.credential(provider.getCredential())) ); } @Override protected void configureProxy(CosmosClientBuilder builder) { ProxyAware.Proxy proxy = this.cosmosProperties.getProxy(); this.proxyOptions = HTTP_PROXY_CONVERTER.convert(proxy); if (this.proxyOptions == null) { LOGGER.debug("No proxy properties available."); } } @Override protected void configureRetry(CosmosClientBuilder builder) { RetryAware.Retry retry = this.cosmosProperties.getRetry(); if (isInvalidRetry(retry)) { return; } this.throttlingRetryOptions = new ThrottlingRetryOptions(); this.throttlingRetryOptions.setMaxRetryWaitTime(retry.getTimeout()); this.throttlingRetryOptions.setMaxRetryAttemptsOnThrottledRequests(retry.getMaxAttempts()); } @Override /** * Configure Cosmos connection. * If not configured the proxy of gateway connection, then will try to use the root proxy of Cosmos properties. * @param builder Cosmos client builder * @param map Property mapper */ private void configureConnection(CosmosClientBuilder builder, PropertyMapper map) { map.from(this.cosmosProperties.getResourceToken()).to(builder::resourceToken); map.from(this.cosmosProperties.getPermissions()).whenNot(List::isEmpty).to(builder::permissions); GatewayConnectionConfig gatewayConnection = this.cosmosProperties.getGatewayConnection(); if (proxyOptions != null && gatewayConnection.getProxy() == null) { gatewayConnection.setProxy(proxyOptions); LOGGER.debug("The proxy of the Gateway connection is not configured, " + "then the Azure Spring Proxy configuration will be applied to Cosmos gateway connection."); } if (ConnectionMode.DIRECT.equals(this.cosmosProperties.getConnectionMode())) { builder.directMode(this.cosmosProperties.getDirectConnection(), gatewayConnection); } else if (ConnectionMode.GATEWAY.equals(this.cosmosProperties.getConnectionMode())) { builder.gatewayMode(gatewayConnection); } } /** * Configure ThrottlingRetryOptions. * If not configured the retry options of ThrottlingRetryOptions, then will try to use the root retry options of Cosmos properties. * @param builder Cosmos client builder * @param map Property mapper */ private void configureThrottlingRetryOptions(CosmosClientBuilder builder, PropertyMapper map) { ThrottlingRetryOptions retryOptions = this.cosmosProperties.getThrottlingRetryOptions(); if (this.throttlingRetryOptions != null && isDefaultThrottlingRetryOptions(retryOptions)) { map.from(this.throttlingRetryOptions).to(builder::throttlingRetryOptions); LOGGER.debug("The throttling retry options is not configured, " + "then the Azure Spring Retry configuration will be applied to Cosmos service builder."); } else { map.from(retryOptions).to(builder::throttlingRetryOptions); } } /** * Check if the retry option is the default value, which is defined in azure-cosmos SDK. * @param retryOptions retry options to be checked * @return result */ private boolean isDefaultThrottlingRetryOptions(ThrottlingRetryOptions retryOptions) { ThrottlingRetryOptions defaultOptions = new ThrottlingRetryOptions(); return defaultOptions.getMaxRetryAttemptsOnThrottledRequests() == retryOptions.getMaxRetryAttemptsOnThrottledRequests() && defaultOptions.getMaxRetryWaitTime().equals(retryOptions.getMaxRetryWaitTime()); } /** * Check if the properties of the retry is invalid value. * @param retry retry options to be checked * @return result */ private boolean isInvalidRetry(RetryAware.Retry retry) { return retry.getMaxAttempts() == null || retry.getTimeout() == null; } @Override protected BiConsumer<CosmosClientBuilder, String> consumeApplicationId() { return CosmosClientBuilder::userAgentSuffix; } @Override protected BiConsumer<CosmosClientBuilder, Configuration> consumeConfiguration() { LOGGER.warn("Configuration instance is not supported to configure in CosmosClientBuilder"); return (a, b) -> { }; } @Override protected BiConsumer<CosmosClientBuilder, TokenCredential> consumeDefaultTokenCredential() { return CosmosClientBuilder::credential; } @Override protected BiConsumer<CosmosClientBuilder, String> consumeConnectionString() { LOGGER.debug("Connection string is not supported to configure in CosmosClientBuilder"); return (a, b) -> { }; } }
same here
protected void configureService(CosmosClientBuilder builder) { PropertyMapper map = new PropertyMapper(); map.from(this.cosmosProperties.getEndpoint()).to(builder::endpoint); map.from(this.cosmosProperties.getConsistencyLevel()).to(builder::consistencyLevel); map.from(this.cosmosProperties.getClientTelemetryEnabled()).to(builder::clientTelemetryEnabled); map.from(this.cosmosProperties.getConnectionSharingAcrossClientsEnabled()).to(builder::connectionSharingAcrossClientsEnabled); map.from(this.cosmosProperties.getContentResponseOnWriteEnabled()).to(builder::contentResponseOnWriteEnabled); map.from(this.cosmosProperties.getEndpointDiscoveryEnabled()).to(builder::endpointDiscoveryEnabled); map.from(this.cosmosProperties.getMultipleWriteRegionsEnabled()).to(builder::multipleWriteRegionsEnabled); map.from(this.cosmosProperties.getReadRequestsFallbackEnabled()).to(builder::readRequestsFallbackEnabled); map.from(this.cosmosProperties.getSessionCapturingOverrideEnabled()).to(builder::sessionCapturingOverrideEnabled); map.from(this.cosmosProperties.getPreferredRegions()).whenNot(List::isEmpty).to(builder::preferredRegions); ThrottlingRetryOptions retryOptions = this.cosmosProperties.getThrottlingRetryOptions(); if (this.throttlingRetryOptions != null && isDefaultThrottlingRetryOptions(retryOptions)) { map.from(this.throttlingRetryOptions).to(builder::throttlingRetryOptions); LOGGER.debug("The throttling retry options is not configured, " + "then the Azure Spring Retry configuration will be applied to Cosmos service builder."); } else { map.from(retryOptions).to(builder::throttlingRetryOptions); } map.from(this.cosmosProperties.getResourceToken()).to(builder::resourceToken); map.from(this.cosmosProperties.getPermissions()).whenNot(List::isEmpty).to(builder::permissions); GatewayConnectionConfig gatewayConnection = this.cosmosProperties.getGatewayConnection(); if (proxyOptions != null && gatewayConnection.getProxy() == null) { gatewayConnection.setProxy(proxyOptions); LOGGER.debug("The proxy of the Gateway connection is not configured, " + "then the Azure Spring Proxy configuration will be applied to Cosmos gateway connection."); } if (ConnectionMode.DIRECT.equals(this.cosmosProperties.getConnectionMode())) { builder.directMode(this.cosmosProperties.getDirectConnection(), gatewayConnection); } else if (ConnectionMode.GATEWAY.equals(this.cosmosProperties.getConnectionMode())) { builder.gatewayMode(gatewayConnection); } }
}
protected void configureService(CosmosClientBuilder builder) { PropertyMapper map = new PropertyMapper(); map.from(this.cosmosProperties.getEndpoint()).to(builder::endpoint); map.from(this.cosmosProperties.getConsistencyLevel()).to(builder::consistencyLevel); map.from(this.cosmosProperties.getClientTelemetryEnabled()).to(builder::clientTelemetryEnabled); map.from(this.cosmosProperties.getConnectionSharingAcrossClientsEnabled()).to(builder::connectionSharingAcrossClientsEnabled); map.from(this.cosmosProperties.getContentResponseOnWriteEnabled()).to(builder::contentResponseOnWriteEnabled); map.from(this.cosmosProperties.getEndpointDiscoveryEnabled()).to(builder::endpointDiscoveryEnabled); map.from(this.cosmosProperties.getMultipleWriteRegionsEnabled()).to(builder::multipleWriteRegionsEnabled); map.from(this.cosmosProperties.getReadRequestsFallbackEnabled()).to(builder::readRequestsFallbackEnabled); map.from(this.cosmosProperties.getSessionCapturingOverrideEnabled()).to(builder::sessionCapturingOverrideEnabled); map.from(this.cosmosProperties.getPreferredRegions()).whenNot(List::isEmpty).to(builder::preferredRegions); configureThrottlingRetryOptions(builder, map); configureConnection(builder, map); }
class CosmosClientBuilderFactory extends AbstractAzureServiceClientBuilderFactory<CosmosClientBuilder> { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosClientBuilderFactory.class); private final CosmosProperties cosmosProperties; private ProxyOptions proxyOptions; private ThrottlingRetryOptions throttlingRetryOptions; public CosmosClientBuilderFactory(CosmosProperties cosmosProperties) { this.cosmosProperties = cosmosProperties; } @Override protected CosmosClientBuilder createBuilderInstance() { return new CosmosClientBuilder(); } @Override protected AzureProperties getAzureProperties() { return this.cosmosProperties; } @Override protected List<AuthenticationDescriptor<?>> getAuthenticationDescriptors(CosmosClientBuilder builder) { return Arrays.asList( new KeyAuthenticationDescriptor(provider -> builder.credential(provider.getCredential())), new TokenAuthenticationDescriptor(provider -> builder.credential(provider.getCredential())) ); } @Override protected void configureProxy(CosmosClientBuilder builder) { ProxyAware.Proxy proxy = this.cosmosProperties.getProxy(); this.proxyOptions = HTTP_PROXY_CONVERTER.convert(proxy); if (this.proxyOptions == null) { LOGGER.debug("No proxy properties available."); } } @Override protected void configureRetry(CosmosClientBuilder builder) { RetryAware.Retry retry = this.cosmosProperties.getRetry(); if (isInvalidRetry(retry)) { return; } this.throttlingRetryOptions = new ThrottlingRetryOptions(); this.throttlingRetryOptions.setMaxRetryWaitTime(retry.getTimeout()); this.throttlingRetryOptions.setMaxRetryAttemptsOnThrottledRequests(retry.getMaxAttempts()); } @Override /** * Check if the retry option is the default value, which is defined in azure-cosmos SDK. * @param retryOptions retry options to be checked * @return result */ private boolean isDefaultThrottlingRetryOptions(ThrottlingRetryOptions retryOptions) { if (retryOptions.getMaxRetryWaitTime().equals(Duration.ofSeconds(30)) && retryOptions.getMaxRetryAttemptsOnThrottledRequests() == 9) { return true; } return false; } /** * Check if the properties of the retry is invalid value. * @param retry retry options to be checked * @return result */ private boolean isInvalidRetry(RetryAware.Retry retry) { if (retry.getMaxAttempts() == null || retry.getTimeout() == null) { return true; } return false; } @Override protected BiConsumer<CosmosClientBuilder, String> consumeApplicationId() { return CosmosClientBuilder::userAgentSuffix; } @Override protected BiConsumer<CosmosClientBuilder, Configuration> consumeConfiguration() { LOGGER.warn("Configuration instance is not supported to configure in CosmosClientBuilder"); return (a, b) -> { }; } @Override protected BiConsumer<CosmosClientBuilder, TokenCredential> consumeDefaultTokenCredential() { return CosmosClientBuilder::credential; } @Override protected BiConsumer<CosmosClientBuilder, String> consumeConnectionString() { LOGGER.debug("Connection string is not supported to configure in CosmosClientBuilder"); return (a, b) -> { }; } }
class CosmosClientBuilderFactory extends AbstractAzureServiceClientBuilderFactory<CosmosClientBuilder> { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosClientBuilderFactory.class); private final CosmosProperties cosmosProperties; private ProxyOptions proxyOptions; private ThrottlingRetryOptions throttlingRetryOptions; public CosmosClientBuilderFactory(CosmosProperties cosmosProperties) { this.cosmosProperties = cosmosProperties; } @Override protected CosmosClientBuilder createBuilderInstance() { return new CosmosClientBuilder(); } @Override protected AzureProperties getAzureProperties() { return this.cosmosProperties; } @Override protected List<AuthenticationDescriptor<?>> getAuthenticationDescriptors(CosmosClientBuilder builder) { return Arrays.asList( new KeyAuthenticationDescriptor(provider -> builder.credential(provider.getCredential())), new TokenAuthenticationDescriptor(provider -> builder.credential(provider.getCredential())) ); } @Override protected void configureProxy(CosmosClientBuilder builder) { ProxyAware.Proxy proxy = this.cosmosProperties.getProxy(); this.proxyOptions = HTTP_PROXY_CONVERTER.convert(proxy); if (this.proxyOptions == null) { LOGGER.debug("No proxy properties available."); } } @Override protected void configureRetry(CosmosClientBuilder builder) { RetryAware.Retry retry = this.cosmosProperties.getRetry(); if (isInvalidRetry(retry)) { return; } this.throttlingRetryOptions = new ThrottlingRetryOptions(); this.throttlingRetryOptions.setMaxRetryWaitTime(retry.getTimeout()); this.throttlingRetryOptions.setMaxRetryAttemptsOnThrottledRequests(retry.getMaxAttempts()); } @Override /** * Configure Cosmos connection. * If not configured the proxy of gateway connection, then will try to use the root proxy of Cosmos properties. * @param builder Cosmos client builder * @param map Property mapper */ private void configureConnection(CosmosClientBuilder builder, PropertyMapper map) { map.from(this.cosmosProperties.getResourceToken()).to(builder::resourceToken); map.from(this.cosmosProperties.getPermissions()).whenNot(List::isEmpty).to(builder::permissions); GatewayConnectionConfig gatewayConnection = this.cosmosProperties.getGatewayConnection(); if (proxyOptions != null && gatewayConnection.getProxy() == null) { gatewayConnection.setProxy(proxyOptions); LOGGER.debug("The proxy of the Gateway connection is not configured, " + "then the Azure Spring Proxy configuration will be applied to Cosmos gateway connection."); } if (ConnectionMode.DIRECT.equals(this.cosmosProperties.getConnectionMode())) { builder.directMode(this.cosmosProperties.getDirectConnection(), gatewayConnection); } else if (ConnectionMode.GATEWAY.equals(this.cosmosProperties.getConnectionMode())) { builder.gatewayMode(gatewayConnection); } } /** * Configure ThrottlingRetryOptions. * If not configured the retry options of ThrottlingRetryOptions, then will try to use the root retry options of Cosmos properties. * @param builder Cosmos client builder * @param map Property mapper */ private void configureThrottlingRetryOptions(CosmosClientBuilder builder, PropertyMapper map) { ThrottlingRetryOptions retryOptions = this.cosmosProperties.getThrottlingRetryOptions(); if (this.throttlingRetryOptions != null && isDefaultThrottlingRetryOptions(retryOptions)) { map.from(this.throttlingRetryOptions).to(builder::throttlingRetryOptions); LOGGER.debug("The throttling retry options is not configured, " + "then the Azure Spring Retry configuration will be applied to Cosmos service builder."); } else { map.from(retryOptions).to(builder::throttlingRetryOptions); } } /** * Check if the retry option is the default value, which is defined in azure-cosmos SDK. * @param retryOptions retry options to be checked * @return result */ private boolean isDefaultThrottlingRetryOptions(ThrottlingRetryOptions retryOptions) { ThrottlingRetryOptions defaultOptions = new ThrottlingRetryOptions(); return defaultOptions.getMaxRetryAttemptsOnThrottledRequests() == retryOptions.getMaxRetryAttemptsOnThrottledRequests() && defaultOptions.getMaxRetryWaitTime().equals(retryOptions.getMaxRetryWaitTime()); } /** * Check if the properties of the retry is invalid value. * @param retry retry options to be checked * @return result */ private boolean isInvalidRetry(RetryAware.Retry retry) { return retry.getMaxAttempts() == null || retry.getTimeout() == null; } @Override protected BiConsumer<CosmosClientBuilder, String> consumeApplicationId() { return CosmosClientBuilder::userAgentSuffix; } @Override protected BiConsumer<CosmosClientBuilder, Configuration> consumeConfiguration() { LOGGER.warn("Configuration instance is not supported to configure in CosmosClientBuilder"); return (a, b) -> { }; } @Override protected BiConsumer<CosmosClientBuilder, TokenCredential> consumeDefaultTokenCredential() { return CosmosClientBuilder::credential; } @Override protected BiConsumer<CosmosClientBuilder, String> consumeConnectionString() { LOGGER.debug("Connection string is not supported to configure in CosmosClientBuilder"); return (a, b) -> { }; } }
This is hard code, and this will couple our logic with the SDK implementation. When the SDK changes the default configuration, this assertion won't be valid anymore.
private boolean isDefaultThrottlingRetryOptions(ThrottlingRetryOptions retryOptions) { if (retryOptions.getMaxRetryWaitTime().equals(Duration.ofSeconds(30)) && retryOptions.getMaxRetryAttemptsOnThrottledRequests() == 9) { return true; } return false; }
}
private boolean isDefaultThrottlingRetryOptions(ThrottlingRetryOptions retryOptions) { ThrottlingRetryOptions defaultOptions = new ThrottlingRetryOptions(); return defaultOptions.getMaxRetryAttemptsOnThrottledRequests() == retryOptions.getMaxRetryAttemptsOnThrottledRequests() && defaultOptions.getMaxRetryWaitTime().equals(retryOptions.getMaxRetryWaitTime()); }
class CosmosClientBuilderFactory extends AbstractAzureServiceClientBuilderFactory<CosmosClientBuilder> { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosClientBuilderFactory.class); private final CosmosProperties cosmosProperties; private ProxyOptions proxyOptions; private ThrottlingRetryOptions throttlingRetryOptions; public CosmosClientBuilderFactory(CosmosProperties cosmosProperties) { this.cosmosProperties = cosmosProperties; } @Override protected CosmosClientBuilder createBuilderInstance() { return new CosmosClientBuilder(); } @Override protected AzureProperties getAzureProperties() { return this.cosmosProperties; } @Override protected List<AuthenticationDescriptor<?>> getAuthenticationDescriptors(CosmosClientBuilder builder) { return Arrays.asList( new KeyAuthenticationDescriptor(provider -> builder.credential(provider.getCredential())), new TokenAuthenticationDescriptor(provider -> builder.credential(provider.getCredential())) ); } @Override protected void configureProxy(CosmosClientBuilder builder) { ProxyAware.Proxy proxy = this.cosmosProperties.getProxy(); this.proxyOptions = HTTP_PROXY_CONVERTER.convert(proxy); if (this.proxyOptions == null) { LOGGER.debug("No proxy properties available."); } } @Override protected void configureRetry(CosmosClientBuilder builder) { RetryAware.Retry retry = this.cosmosProperties.getRetry(); if (isInvalidRetry(retry)) { return; } this.throttlingRetryOptions = new ThrottlingRetryOptions(); this.throttlingRetryOptions.setMaxRetryWaitTime(retry.getTimeout()); this.throttlingRetryOptions.setMaxRetryAttemptsOnThrottledRequests(retry.getMaxAttempts()); } @Override protected void configureService(CosmosClientBuilder builder) { PropertyMapper map = new PropertyMapper(); map.from(this.cosmosProperties.getEndpoint()).to(builder::endpoint); map.from(this.cosmosProperties.getConsistencyLevel()).to(builder::consistencyLevel); map.from(this.cosmosProperties.getClientTelemetryEnabled()).to(builder::clientTelemetryEnabled); map.from(this.cosmosProperties.getConnectionSharingAcrossClientsEnabled()).to(builder::connectionSharingAcrossClientsEnabled); map.from(this.cosmosProperties.getContentResponseOnWriteEnabled()).to(builder::contentResponseOnWriteEnabled); map.from(this.cosmosProperties.getEndpointDiscoveryEnabled()).to(builder::endpointDiscoveryEnabled); map.from(this.cosmosProperties.getMultipleWriteRegionsEnabled()).to(builder::multipleWriteRegionsEnabled); map.from(this.cosmosProperties.getReadRequestsFallbackEnabled()).to(builder::readRequestsFallbackEnabled); map.from(this.cosmosProperties.getSessionCapturingOverrideEnabled()).to(builder::sessionCapturingOverrideEnabled); map.from(this.cosmosProperties.getPreferredRegions()).whenNot(List::isEmpty).to(builder::preferredRegions); ThrottlingRetryOptions retryOptions = this.cosmosProperties.getThrottlingRetryOptions(); if (this.throttlingRetryOptions != null && isDefaultThrottlingRetryOptions(retryOptions)) { map.from(this.throttlingRetryOptions).to(builder::throttlingRetryOptions); LOGGER.debug("The throttling retry options is not configured, " + "then the Azure Spring Retry configuration will be applied to Cosmos service builder."); } else { map.from(retryOptions).to(builder::throttlingRetryOptions); } map.from(this.cosmosProperties.getResourceToken()).to(builder::resourceToken); map.from(this.cosmosProperties.getPermissions()).whenNot(List::isEmpty).to(builder::permissions); GatewayConnectionConfig gatewayConnection = this.cosmosProperties.getGatewayConnection(); if (proxyOptions != null && gatewayConnection.getProxy() == null) { gatewayConnection.setProxy(proxyOptions); LOGGER.debug("The proxy of the Gateway connection is not configured, " + "then the Azure Spring Proxy configuration will be applied to Cosmos gateway connection."); } if (ConnectionMode.DIRECT.equals(this.cosmosProperties.getConnectionMode())) { builder.directMode(this.cosmosProperties.getDirectConnection(), gatewayConnection); } else if (ConnectionMode.GATEWAY.equals(this.cosmosProperties.getConnectionMode())) { builder.gatewayMode(gatewayConnection); } } /** * Check if the retry option is the default value, which is defined in azure-cosmos SDK. * @param retryOptions retry options to be checked * @return result */ /** * Check if the properties of the retry is invalid value. * @param retry retry options to be checked * @return result */ private boolean isInvalidRetry(RetryAware.Retry retry) { if (retry.getMaxAttempts() == null || retry.getTimeout() == null) { return true; } return false; } @Override protected BiConsumer<CosmosClientBuilder, String> consumeApplicationId() { return CosmosClientBuilder::userAgentSuffix; } @Override protected BiConsumer<CosmosClientBuilder, Configuration> consumeConfiguration() { LOGGER.warn("Configuration instance is not supported to configure in CosmosClientBuilder"); return (a, b) -> { }; } @Override protected BiConsumer<CosmosClientBuilder, TokenCredential> consumeDefaultTokenCredential() { return CosmosClientBuilder::credential; } @Override protected BiConsumer<CosmosClientBuilder, String> consumeConnectionString() { LOGGER.debug("Connection string is not supported to configure in CosmosClientBuilder"); return (a, b) -> { }; } }
class CosmosClientBuilderFactory extends AbstractAzureServiceClientBuilderFactory<CosmosClientBuilder> { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosClientBuilderFactory.class); private final CosmosProperties cosmosProperties; private ProxyOptions proxyOptions; private ThrottlingRetryOptions throttlingRetryOptions; public CosmosClientBuilderFactory(CosmosProperties cosmosProperties) { this.cosmosProperties = cosmosProperties; } @Override protected CosmosClientBuilder createBuilderInstance() { return new CosmosClientBuilder(); } @Override protected AzureProperties getAzureProperties() { return this.cosmosProperties; } @Override protected List<AuthenticationDescriptor<?>> getAuthenticationDescriptors(CosmosClientBuilder builder) { return Arrays.asList( new KeyAuthenticationDescriptor(provider -> builder.credential(provider.getCredential())), new TokenAuthenticationDescriptor(provider -> builder.credential(provider.getCredential())) ); } @Override protected void configureProxy(CosmosClientBuilder builder) { ProxyAware.Proxy proxy = this.cosmosProperties.getProxy(); this.proxyOptions = HTTP_PROXY_CONVERTER.convert(proxy); if (this.proxyOptions == null) { LOGGER.debug("No proxy properties available."); } } @Override protected void configureRetry(CosmosClientBuilder builder) { RetryAware.Retry retry = this.cosmosProperties.getRetry(); if (isInvalidRetry(retry)) { return; } this.throttlingRetryOptions = new ThrottlingRetryOptions(); this.throttlingRetryOptions.setMaxRetryWaitTime(retry.getTimeout()); this.throttlingRetryOptions.setMaxRetryAttemptsOnThrottledRequests(retry.getMaxAttempts()); } @Override protected void configureService(CosmosClientBuilder builder) { PropertyMapper map = new PropertyMapper(); map.from(this.cosmosProperties.getEndpoint()).to(builder::endpoint); map.from(this.cosmosProperties.getConsistencyLevel()).to(builder::consistencyLevel); map.from(this.cosmosProperties.getClientTelemetryEnabled()).to(builder::clientTelemetryEnabled); map.from(this.cosmosProperties.getConnectionSharingAcrossClientsEnabled()).to(builder::connectionSharingAcrossClientsEnabled); map.from(this.cosmosProperties.getContentResponseOnWriteEnabled()).to(builder::contentResponseOnWriteEnabled); map.from(this.cosmosProperties.getEndpointDiscoveryEnabled()).to(builder::endpointDiscoveryEnabled); map.from(this.cosmosProperties.getMultipleWriteRegionsEnabled()).to(builder::multipleWriteRegionsEnabled); map.from(this.cosmosProperties.getReadRequestsFallbackEnabled()).to(builder::readRequestsFallbackEnabled); map.from(this.cosmosProperties.getSessionCapturingOverrideEnabled()).to(builder::sessionCapturingOverrideEnabled); map.from(this.cosmosProperties.getPreferredRegions()).whenNot(List::isEmpty).to(builder::preferredRegions); configureThrottlingRetryOptions(builder, map); configureConnection(builder, map); } /** * Configure Cosmos connection. * If not configured the proxy of gateway connection, then will try to use the root proxy of Cosmos properties. * @param builder Cosmos client builder * @param map Property mapper */ private void configureConnection(CosmosClientBuilder builder, PropertyMapper map) { map.from(this.cosmosProperties.getResourceToken()).to(builder::resourceToken); map.from(this.cosmosProperties.getPermissions()).whenNot(List::isEmpty).to(builder::permissions); GatewayConnectionConfig gatewayConnection = this.cosmosProperties.getGatewayConnection(); if (proxyOptions != null && gatewayConnection.getProxy() == null) { gatewayConnection.setProxy(proxyOptions); LOGGER.debug("The proxy of the Gateway connection is not configured, " + "then the Azure Spring Proxy configuration will be applied to Cosmos gateway connection."); } if (ConnectionMode.DIRECT.equals(this.cosmosProperties.getConnectionMode())) { builder.directMode(this.cosmosProperties.getDirectConnection(), gatewayConnection); } else if (ConnectionMode.GATEWAY.equals(this.cosmosProperties.getConnectionMode())) { builder.gatewayMode(gatewayConnection); } } /** * Configure ThrottlingRetryOptions. * If not configured the retry options of ThrottlingRetryOptions, then will try to use the root retry options of Cosmos properties. * @param builder Cosmos client builder * @param map Property mapper */ private void configureThrottlingRetryOptions(CosmosClientBuilder builder, PropertyMapper map) { ThrottlingRetryOptions retryOptions = this.cosmosProperties.getThrottlingRetryOptions(); if (this.throttlingRetryOptions != null && isDefaultThrottlingRetryOptions(retryOptions)) { map.from(this.throttlingRetryOptions).to(builder::throttlingRetryOptions); LOGGER.debug("The throttling retry options is not configured, " + "then the Azure Spring Retry configuration will be applied to Cosmos service builder."); } else { map.from(retryOptions).to(builder::throttlingRetryOptions); } } /** * Check if the retry option is the default value, which is defined in azure-cosmos SDK. * @param retryOptions retry options to be checked * @return result */ /** * Check if the properties of the retry is invalid value. * @param retry retry options to be checked * @return result */ private boolean isInvalidRetry(RetryAware.Retry retry) { return retry.getMaxAttempts() == null || retry.getTimeout() == null; } @Override protected BiConsumer<CosmosClientBuilder, String> consumeApplicationId() { return CosmosClientBuilder::userAgentSuffix; } @Override protected BiConsumer<CosmosClientBuilder, Configuration> consumeConfiguration() { LOGGER.warn("Configuration instance is not supported to configure in CosmosClientBuilder"); return (a, b) -> { }; } @Override protected BiConsumer<CosmosClientBuilder, TokenCredential> consumeDefaultTokenCredential() { return CosmosClientBuilder::credential; } @Override protected BiConsumer<CosmosClientBuilder, String> consumeConnectionString() { LOGGER.debug("Connection string is not supported to configure in CosmosClientBuilder"); return (a, b) -> { }; } }
return retry.getMatAttempts() == null || retry.getTimeout == null.
private boolean isInvalidRetry(RetryAware.Retry retry) { if (retry.getMaxAttempts() == null || retry.getTimeout() == null) { return true; } return false; }
return false;
private boolean isInvalidRetry(RetryAware.Retry retry) { return retry.getMaxAttempts() == null || retry.getTimeout() == null; }
class CosmosClientBuilderFactory extends AbstractAzureServiceClientBuilderFactory<CosmosClientBuilder> { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosClientBuilderFactory.class); private final CosmosProperties cosmosProperties; private ProxyOptions proxyOptions; private ThrottlingRetryOptions throttlingRetryOptions; public CosmosClientBuilderFactory(CosmosProperties cosmosProperties) { this.cosmosProperties = cosmosProperties; } @Override protected CosmosClientBuilder createBuilderInstance() { return new CosmosClientBuilder(); } @Override protected AzureProperties getAzureProperties() { return this.cosmosProperties; } @Override protected List<AuthenticationDescriptor<?>> getAuthenticationDescriptors(CosmosClientBuilder builder) { return Arrays.asList( new KeyAuthenticationDescriptor(provider -> builder.credential(provider.getCredential())), new TokenAuthenticationDescriptor(provider -> builder.credential(provider.getCredential())) ); } @Override protected void configureProxy(CosmosClientBuilder builder) { ProxyAware.Proxy proxy = this.cosmosProperties.getProxy(); this.proxyOptions = HTTP_PROXY_CONVERTER.convert(proxy); if (this.proxyOptions == null) { LOGGER.debug("No proxy properties available."); } } @Override protected void configureRetry(CosmosClientBuilder builder) { RetryAware.Retry retry = this.cosmosProperties.getRetry(); if (isInvalidRetry(retry)) { return; } this.throttlingRetryOptions = new ThrottlingRetryOptions(); this.throttlingRetryOptions.setMaxRetryWaitTime(retry.getTimeout()); this.throttlingRetryOptions.setMaxRetryAttemptsOnThrottledRequests(retry.getMaxAttempts()); } @Override protected void configureService(CosmosClientBuilder builder) { PropertyMapper map = new PropertyMapper(); map.from(this.cosmosProperties.getEndpoint()).to(builder::endpoint); map.from(this.cosmosProperties.getConsistencyLevel()).to(builder::consistencyLevel); map.from(this.cosmosProperties.getClientTelemetryEnabled()).to(builder::clientTelemetryEnabled); map.from(this.cosmosProperties.getConnectionSharingAcrossClientsEnabled()).to(builder::connectionSharingAcrossClientsEnabled); map.from(this.cosmosProperties.getContentResponseOnWriteEnabled()).to(builder::contentResponseOnWriteEnabled); map.from(this.cosmosProperties.getEndpointDiscoveryEnabled()).to(builder::endpointDiscoveryEnabled); map.from(this.cosmosProperties.getMultipleWriteRegionsEnabled()).to(builder::multipleWriteRegionsEnabled); map.from(this.cosmosProperties.getReadRequestsFallbackEnabled()).to(builder::readRequestsFallbackEnabled); map.from(this.cosmosProperties.getSessionCapturingOverrideEnabled()).to(builder::sessionCapturingOverrideEnabled); map.from(this.cosmosProperties.getPreferredRegions()).whenNot(List::isEmpty).to(builder::preferredRegions); ThrottlingRetryOptions retryOptions = this.cosmosProperties.getThrottlingRetryOptions(); if (this.throttlingRetryOptions != null && isDefaultThrottlingRetryOptions(retryOptions)) { map.from(this.throttlingRetryOptions).to(builder::throttlingRetryOptions); LOGGER.debug("The throttling retry options is not configured, " + "then the Azure Spring Retry configuration will be applied to Cosmos service builder."); } else { map.from(retryOptions).to(builder::throttlingRetryOptions); } map.from(this.cosmosProperties.getResourceToken()).to(builder::resourceToken); map.from(this.cosmosProperties.getPermissions()).whenNot(List::isEmpty).to(builder::permissions); GatewayConnectionConfig gatewayConnection = this.cosmosProperties.getGatewayConnection(); if (proxyOptions != null && gatewayConnection.getProxy() == null) { gatewayConnection.setProxy(proxyOptions); LOGGER.debug("The proxy of the Gateway connection is not configured, " + "then the Azure Spring Proxy configuration will be applied to Cosmos gateway connection."); } if (ConnectionMode.DIRECT.equals(this.cosmosProperties.getConnectionMode())) { builder.directMode(this.cosmosProperties.getDirectConnection(), gatewayConnection); } else if (ConnectionMode.GATEWAY.equals(this.cosmosProperties.getConnectionMode())) { builder.gatewayMode(gatewayConnection); } } /** * Check if the retry option is the default value, which is defined in azure-cosmos SDK. * @param retryOptions retry options to be checked * @return result */ private boolean isDefaultThrottlingRetryOptions(ThrottlingRetryOptions retryOptions) { if (retryOptions.getMaxRetryWaitTime().equals(Duration.ofSeconds(30)) && retryOptions.getMaxRetryAttemptsOnThrottledRequests() == 9) { return true; } return false; } /** * Check if the properties of the retry is invalid value. * @param retry retry options to be checked * @return result */ @Override protected BiConsumer<CosmosClientBuilder, String> consumeApplicationId() { return CosmosClientBuilder::userAgentSuffix; } @Override protected BiConsumer<CosmosClientBuilder, Configuration> consumeConfiguration() { LOGGER.warn("Configuration instance is not supported to configure in CosmosClientBuilder"); return (a, b) -> { }; } @Override protected BiConsumer<CosmosClientBuilder, TokenCredential> consumeDefaultTokenCredential() { return CosmosClientBuilder::credential; } @Override protected BiConsumer<CosmosClientBuilder, String> consumeConnectionString() { LOGGER.debug("Connection string is not supported to configure in CosmosClientBuilder"); return (a, b) -> { }; } }
class CosmosClientBuilderFactory extends AbstractAzureServiceClientBuilderFactory<CosmosClientBuilder> { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosClientBuilderFactory.class); private final CosmosProperties cosmosProperties; private ProxyOptions proxyOptions; private ThrottlingRetryOptions throttlingRetryOptions; public CosmosClientBuilderFactory(CosmosProperties cosmosProperties) { this.cosmosProperties = cosmosProperties; } @Override protected CosmosClientBuilder createBuilderInstance() { return new CosmosClientBuilder(); } @Override protected AzureProperties getAzureProperties() { return this.cosmosProperties; } @Override protected List<AuthenticationDescriptor<?>> getAuthenticationDescriptors(CosmosClientBuilder builder) { return Arrays.asList( new KeyAuthenticationDescriptor(provider -> builder.credential(provider.getCredential())), new TokenAuthenticationDescriptor(provider -> builder.credential(provider.getCredential())) ); } @Override protected void configureProxy(CosmosClientBuilder builder) { ProxyAware.Proxy proxy = this.cosmosProperties.getProxy(); this.proxyOptions = HTTP_PROXY_CONVERTER.convert(proxy); if (this.proxyOptions == null) { LOGGER.debug("No proxy properties available."); } } @Override protected void configureRetry(CosmosClientBuilder builder) { RetryAware.Retry retry = this.cosmosProperties.getRetry(); if (isInvalidRetry(retry)) { return; } this.throttlingRetryOptions = new ThrottlingRetryOptions(); this.throttlingRetryOptions.setMaxRetryWaitTime(retry.getTimeout()); this.throttlingRetryOptions.setMaxRetryAttemptsOnThrottledRequests(retry.getMaxAttempts()); } @Override protected void configureService(CosmosClientBuilder builder) { PropertyMapper map = new PropertyMapper(); map.from(this.cosmosProperties.getEndpoint()).to(builder::endpoint); map.from(this.cosmosProperties.getConsistencyLevel()).to(builder::consistencyLevel); map.from(this.cosmosProperties.getClientTelemetryEnabled()).to(builder::clientTelemetryEnabled); map.from(this.cosmosProperties.getConnectionSharingAcrossClientsEnabled()).to(builder::connectionSharingAcrossClientsEnabled); map.from(this.cosmosProperties.getContentResponseOnWriteEnabled()).to(builder::contentResponseOnWriteEnabled); map.from(this.cosmosProperties.getEndpointDiscoveryEnabled()).to(builder::endpointDiscoveryEnabled); map.from(this.cosmosProperties.getMultipleWriteRegionsEnabled()).to(builder::multipleWriteRegionsEnabled); map.from(this.cosmosProperties.getReadRequestsFallbackEnabled()).to(builder::readRequestsFallbackEnabled); map.from(this.cosmosProperties.getSessionCapturingOverrideEnabled()).to(builder::sessionCapturingOverrideEnabled); map.from(this.cosmosProperties.getPreferredRegions()).whenNot(List::isEmpty).to(builder::preferredRegions); configureThrottlingRetryOptions(builder, map); configureConnection(builder, map); } /** * Configure Cosmos connection. * If not configured the proxy of gateway connection, then will try to use the root proxy of Cosmos properties. * @param builder Cosmos client builder * @param map Property mapper */ private void configureConnection(CosmosClientBuilder builder, PropertyMapper map) { map.from(this.cosmosProperties.getResourceToken()).to(builder::resourceToken); map.from(this.cosmosProperties.getPermissions()).whenNot(List::isEmpty).to(builder::permissions); GatewayConnectionConfig gatewayConnection = this.cosmosProperties.getGatewayConnection(); if (proxyOptions != null && gatewayConnection.getProxy() == null) { gatewayConnection.setProxy(proxyOptions); LOGGER.debug("The proxy of the Gateway connection is not configured, " + "then the Azure Spring Proxy configuration will be applied to Cosmos gateway connection."); } if (ConnectionMode.DIRECT.equals(this.cosmosProperties.getConnectionMode())) { builder.directMode(this.cosmosProperties.getDirectConnection(), gatewayConnection); } else if (ConnectionMode.GATEWAY.equals(this.cosmosProperties.getConnectionMode())) { builder.gatewayMode(gatewayConnection); } } /** * Configure ThrottlingRetryOptions. * If not configured the retry options of ThrottlingRetryOptions, then will try to use the root retry options of Cosmos properties. * @param builder Cosmos client builder * @param map Property mapper */ private void configureThrottlingRetryOptions(CosmosClientBuilder builder, PropertyMapper map) { ThrottlingRetryOptions retryOptions = this.cosmosProperties.getThrottlingRetryOptions(); if (this.throttlingRetryOptions != null && isDefaultThrottlingRetryOptions(retryOptions)) { map.from(this.throttlingRetryOptions).to(builder::throttlingRetryOptions); LOGGER.debug("The throttling retry options is not configured, " + "then the Azure Spring Retry configuration will be applied to Cosmos service builder."); } else { map.from(retryOptions).to(builder::throttlingRetryOptions); } } /** * Check if the retry option is the default value, which is defined in azure-cosmos SDK. * @param retryOptions retry options to be checked * @return result */ private boolean isDefaultThrottlingRetryOptions(ThrottlingRetryOptions retryOptions) { ThrottlingRetryOptions defaultOptions = new ThrottlingRetryOptions(); return defaultOptions.getMaxRetryAttemptsOnThrottledRequests() == retryOptions.getMaxRetryAttemptsOnThrottledRequests() && defaultOptions.getMaxRetryWaitTime().equals(retryOptions.getMaxRetryWaitTime()); } /** * Check if the properties of the retry is invalid value. * @param retry retry options to be checked * @return result */ @Override protected BiConsumer<CosmosClientBuilder, String> consumeApplicationId() { return CosmosClientBuilder::userAgentSuffix; } @Override protected BiConsumer<CosmosClientBuilder, Configuration> consumeConfiguration() { LOGGER.warn("Configuration instance is not supported to configure in CosmosClientBuilder"); return (a, b) -> { }; } @Override protected BiConsumer<CosmosClientBuilder, TokenCredential> consumeDefaultTokenCredential() { return CosmosClientBuilder::credential; } @Override protected BiConsumer<CosmosClientBuilder, String> consumeConnectionString() { LOGGER.debug("Connection string is not supported to configure in CosmosClientBuilder"); return (a, b) -> { }; } }
if we pass `rxCollectionCache` in the constructor then you won't need null check here.
private Mono<ShouldRetryResult> shouldRetryOnStaleContainer() { this.staleContainerRetryCount++; if (this.rxCollectionCache == null || this.staleContainerRetryCount > 1) { return Mono.just(ShouldRetryResult.noRetry()); } this.request.setForceNameCacheRefresh(true); if(request.intendedCollectionRidPassedIntoSDK) { return this.rxCollectionCache.refreshAsync(null, this.request).then( Mono.just(ShouldRetryResult.noRetry())); } if(StringUtils.isNotEmpty(request.getHeaders().get(INTENDED_COLLECTION_RID_HEADER))) { request.getHeaders().remove(INTENDED_COLLECTION_RID_HEADER); } return this.rxCollectionCache.refreshAsync(null, this.request).then(Mono.just(ShouldRetryResult.retryAfter(Duration.ZERO))); }
if (this.rxCollectionCache == null || this.staleContainerRetryCount > 1) {
private Mono<ShouldRetryResult> shouldRetryOnStaleContainer() { this.staleContainerRetryCount++; if (this.rxCollectionCache == null || this.staleContainerRetryCount > 1) { return Mono.just(ShouldRetryResult.noRetry()); } this.request.setForceNameCacheRefresh(true); if(request.intendedCollectionRidPassedIntoSDK) { return this.rxCollectionCache.refreshAsync(null, this.request).then( Mono.just(ShouldRetryResult.noRetry())); } if(StringUtils.isNotEmpty(request.getHeaders().get(INTENDED_COLLECTION_RID_HEADER))) { request.getHeaders().remove(INTENDED_COLLECTION_RID_HEADER); } return this.rxCollectionCache.refreshAsync(null, this.request).then(Mono.just(ShouldRetryResult.retryAfter(Duration.ZERO))); }
class ClientRetryPolicy extends DocumentClientRetryPolicy { private final static Logger logger = LoggerFactory.getLogger(ClientRetryPolicy.class); final static int RetryIntervalInMS = 1000; final static int MaxRetryCount = 120; private final static int MaxServiceUnavailableRetryCount = 1; private final static int MAX_QUERY_PLAN_AND_ADDRESS_RETRY_COUNT = 2; private final DocumentClientRetryPolicy throttlingRetry; private final GlobalEndpointManager globalEndpointManager; private final boolean enableEndpointDiscovery; private int failoverRetryCount; private int sessionTokenRetryCount; private int staleContainerRetryCount; private boolean isReadRequest; private boolean canUseMultipleWriteLocations; private URI locationEndpoint; private RetryContext retryContext; private CosmosDiagnostics cosmosDiagnostics; private AtomicInteger cnt = new AtomicInteger(0); private int serviceUnavailableRetryCount; private int queryPlanAddressRefreshCount; private RxDocumentServiceRequest request; private RxCollectionCache rxCollectionCache; public ClientRetryPolicy(DiagnosticsClientContext diagnosticsClientContext, GlobalEndpointManager globalEndpointManager, boolean enableEndpointDiscovery, ThrottlingRetryOptions throttlingRetryOptions, RxCollectionCache rxCollectionCache) { this.globalEndpointManager = globalEndpointManager; this.failoverRetryCount = 0; this.enableEndpointDiscovery = enableEndpointDiscovery; this.sessionTokenRetryCount = 0; this.staleContainerRetryCount = 0; this.canUseMultipleWriteLocations = false; this.cosmosDiagnostics = diagnosticsClientContext.createDiagnostics(); this.throttlingRetry = new ResourceThrottleRetryPolicy( throttlingRetryOptions.getMaxRetryAttemptsOnThrottledRequests(), throttlingRetryOptions.getMaxRetryWaitTime(), BridgeInternal.getRetryContext(this.getCosmosDiagnostics()), false); this.rxCollectionCache = rxCollectionCache; } @Override public Mono<ShouldRetryResult> shouldRetry(Exception e) { logger.debug("retry count {}, isReadRequest {}, canUseMultipleWriteLocations {}, due to failure:", cnt.incrementAndGet(), isReadRequest, canUseMultipleWriteLocations, e); if (this.locationEndpoint == null) { logger.error("locationEndpoint is null because ClientRetryPolicy::onBeforeRequest(.) is not invoked, " + "probably request creation failed due to invalid options, serialization setting, etc."); return Mono.just(ShouldRetryResult.error(e)); } this.retryContext = null; CosmosException clientException = Utils.as(e, CosmosException.class); if (clientException != null && clientException.getDiagnostics() != null) { this.cosmosDiagnostics = clientException.getDiagnostics(); } if (clientException != null && Exceptions.isStatusCode(clientException, HttpConstants.StatusCodes.FORBIDDEN) && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.FORBIDDEN_WRITEFORBIDDEN)) { logger.warn("Endpoint not writable. Will refresh cache and retry ", e); return this.shouldRetryOnEndpointFailureAsync(false, true); } if (clientException != null && Exceptions.isStatusCode(clientException, HttpConstants.StatusCodes.FORBIDDEN) && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.DATABASE_ACCOUNT_NOTFOUND) && this.isReadRequest) { logger.warn("Endpoint not available for reads. Will refresh cache and retry. ", e); return this.shouldRetryOnEndpointFailureAsync(true, false); } if (WebExceptionUtility.isNetworkFailure(e)) { if (clientException != null && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE)) { if (this.isReadRequest || WebExceptionUtility.isWebExceptionRetriable(e)) { logger.warn("Gateway endpoint not reachable. Will refresh cache and retry. ", e); return this.shouldRetryOnEndpointFailureAsync(this.isReadRequest, false); } else { return this.shouldNotRetryOnEndpointFailureAsync(this.isReadRequest, false); } } else if (clientException != null && WebExceptionUtility.isReadTimeoutException(clientException) && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT)) { if (this.request.getOperationType() == OperationType.QueryPlan || this.request.isAddressRefresh()) { return shouldRetryQueryPlanAndAddress(); } } else { logger.warn("Backend endpoint not reachable. ", e); return this.shouldRetryOnBackendServiceUnavailableAsync(this.isReadRequest, WebExceptionUtility .isWebExceptionRetriable(e)); } } if (clientException != null && Exceptions.isStatusCode(clientException, HttpConstants.StatusCodes.NOTFOUND) && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)) { return Mono.just(this.shouldRetryOnSessionNotAvailable()); } if (clientException != null && Exceptions.isStatusCode(clientException, HttpConstants.StatusCodes.BADREQUEST) && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.INCORRECT_CONTAINER_RID_SUB_STATUS)) { return this.shouldRetryOnStaleContainer(); } return this.throttlingRetry.shouldRetry(e); } private Mono<ShouldRetryResult> shouldRetryQueryPlanAndAddress() { if (this.queryPlanAddressRefreshCount++ > MAX_QUERY_PLAN_AND_ADDRESS_RETRY_COUNT) { logger .warn( "shouldRetryQueryPlanAndAddress() No more retrying on endpoint {}, operationType = {}, count = {}, " + "isAddressRefresh = {}", this.locationEndpoint, this.request.getOperationType(), this.queryPlanAddressRefreshCount, this.request.isAddressRefresh()); return Mono.just(ShouldRetryResult.noRetry()); } logger .warn("shouldRetryQueryPlanAndAddress() Retrying on endpoint {}, operationType = {}, count = {}, " + "isAddressRefresh = {}, shouldForcedAddressRefresh = {}, " + "shouldForceCollectionRoutingMapRefresh = {}", this.locationEndpoint, this.request.getOperationType(), this.queryPlanAddressRefreshCount, this.request.isAddressRefresh(), this.request.shouldForceAddressRefresh(), this.request.forceCollectionRoutingMapRefresh); Duration retryDelay = Duration.ZERO; return Mono.just(ShouldRetryResult.retryAfter(retryDelay)); } private ShouldRetryResult shouldRetryOnSessionNotAvailable() { this.sessionTokenRetryCount++; if (!this.enableEndpointDiscovery) { return ShouldRetryResult.noRetry(); } else { if (this.canUseMultipleWriteLocations) { UnmodifiableList<URI> endpoints = this.isReadRequest ? this.globalEndpointManager.getReadEndpoints() : this.globalEndpointManager.getWriteEndpoints(); if (this.sessionTokenRetryCount > endpoints.size()) { return ShouldRetryResult.noRetry(); } else { this.retryContext = new RetryContext(this.sessionTokenRetryCount , true); return ShouldRetryResult.retryAfter(Duration.ZERO); } } else { if (this.sessionTokenRetryCount > 1) { return ShouldRetryResult.noRetry(); } else { this.retryContext = new RetryContext(0, false); return ShouldRetryResult.retryAfter(Duration.ZERO); } } } } private Mono<ShouldRetryResult> shouldRetryOnEndpointFailureAsync(boolean isReadRequest , boolean forceRefresh) { if (!this.enableEndpointDiscovery || this.failoverRetryCount > MaxRetryCount) { logger.warn("ShouldRetryOnEndpointFailureAsync() Not retrying. Retry count = {}", this.failoverRetryCount); return Mono.just(ShouldRetryResult.noRetry()); } Mono<Void> refreshLocationCompletable = this.refreshLocation(isReadRequest, forceRefresh); Duration retryDelay = Duration.ZERO; if (!isReadRequest) { logger.debug("Failover happening. retryCount {}", this.failoverRetryCount); if (this.failoverRetryCount > 1) { retryDelay = Duration.ofMillis(ClientRetryPolicy.RetryIntervalInMS); } } else { retryDelay = Duration.ofMillis(ClientRetryPolicy.RetryIntervalInMS); } return refreshLocationCompletable.then(Mono.just(ShouldRetryResult.retryAfter(retryDelay))); } private Mono<ShouldRetryResult> shouldNotRetryOnEndpointFailureAsync(boolean isReadRequest , boolean forceRefresh) { if (!this.enableEndpointDiscovery || this.failoverRetryCount > MaxRetryCount) { logger.warn("ShouldRetryOnEndpointFailureAsync() Not retrying. Retry count = {}", this.failoverRetryCount); return Mono.just(ShouldRetryResult.noRetry()); } Mono<Void> refreshLocationCompletable = this.refreshLocation(isReadRequest, forceRefresh); return refreshLocationCompletable.then(Mono.just(ShouldRetryResult.noRetry())); } private Mono<Void> refreshLocation(boolean isReadRequest, boolean forceRefresh) { this.failoverRetryCount++; if (isReadRequest) { logger.warn("marking the endpoint {} as unavailable for read",this.locationEndpoint); this.globalEndpointManager.markEndpointUnavailableForRead(this.locationEndpoint); } else { logger.warn("marking the endpoint {} as unavailable for write",this.locationEndpoint); this.globalEndpointManager.markEndpointUnavailableForWrite(this.locationEndpoint); } this.retryContext = new RetryContext(this.failoverRetryCount, false); return this.globalEndpointManager.refreshLocationAsync(null, forceRefresh); } private Mono<ShouldRetryResult> shouldRetryOnBackendServiceUnavailableAsync(boolean isReadRequest, boolean isWebExceptionRetriable) { if (!isReadRequest && !isWebExceptionRetriable) { logger.warn("shouldRetryOnBackendServiceUnavailableAsync() Not retrying on write with non retriable exception. Retry count = {}", this.serviceUnavailableRetryCount); return Mono.just(ShouldRetryResult.noRetry()); } if (this.serviceUnavailableRetryCount++ > MaxServiceUnavailableRetryCount) { logger.warn("shouldRetryOnBackendServiceUnavailableAsync() Not retrying. Retry count = {}", this.serviceUnavailableRetryCount); return Mono.just(ShouldRetryResult.noRetry()); } if (!this.canUseMultipleWriteLocations && !isReadRequest) { return Mono.just(ShouldRetryResult.noRetry()); } int availablePreferredLocations = this.globalEndpointManager.getPreferredLocationCount(); if (availablePreferredLocations <= 1) { logger.warn("shouldRetryOnServiceUnavailable() Not retrying. No other regions available for the request. AvailablePreferredLocations = {}", availablePreferredLocations); return Mono.just(ShouldRetryResult.noRetry()); } logger.warn("shouldRetryOnServiceUnavailable() Retrying. Received on endpoint {}, IsReadRequest = {}", this.locationEndpoint, isReadRequest); this.retryContext = new RetryContext(this.serviceUnavailableRetryCount, true); return Mono.just(ShouldRetryResult.retryAfter(Duration.ZERO)); } @Override public void onBeforeSendRequest(RxDocumentServiceRequest request) { this.request = request; this.isReadRequest = request.isReadOnlyRequest(); this.canUseMultipleWriteLocations = this.globalEndpointManager.canUseMultipleWriteLocations(request); if (request.requestContext != null) { request.requestContext.cosmosDiagnostics = this.cosmosDiagnostics; } if (request.requestContext != null) { request.requestContext.clearRouteToLocation(); } if (this.retryContext != null) { request.requestContext.routeToLocation(this.retryContext.retryCount, this.retryContext.retryRequestOnPreferredLocations); } this.locationEndpoint = this.globalEndpointManager.resolveServiceEndpoint(request); if (request.requestContext != null) { request.requestContext.routeToLocation(this.locationEndpoint); } } @Override public com.azure.cosmos.implementation.RetryContext getRetryContext() { return BridgeInternal.getRetryContext(this.getCosmosDiagnostics()); } CosmosDiagnostics getCosmosDiagnostics() { return cosmosDiagnostics; } private static class RetryContext { public int retryCount; public boolean retryRequestOnPreferredLocations; public RetryContext(int retryCount, boolean retryRequestOnPreferredLocations) { this.retryCount = retryCount; this.retryRequestOnPreferredLocations = retryRequestOnPreferredLocations; } } }
class ClientRetryPolicy extends DocumentClientRetryPolicy { private final static Logger logger = LoggerFactory.getLogger(ClientRetryPolicy.class); final static int RetryIntervalInMS = 1000; final static int MaxRetryCount = 120; private final static int MaxServiceUnavailableRetryCount = 1; private final static int MAX_QUERY_PLAN_AND_ADDRESS_RETRY_COUNT = 2; private final DocumentClientRetryPolicy throttlingRetry; private final GlobalEndpointManager globalEndpointManager; private final boolean enableEndpointDiscovery; private int failoverRetryCount; private int sessionTokenRetryCount; private int staleContainerRetryCount; private boolean isReadRequest; private boolean canUseMultipleWriteLocations; private URI locationEndpoint; private RetryContext retryContext; private CosmosDiagnostics cosmosDiagnostics; private AtomicInteger cnt = new AtomicInteger(0); private int serviceUnavailableRetryCount; private int queryPlanAddressRefreshCount; private RxDocumentServiceRequest request; private RxCollectionCache rxCollectionCache; public ClientRetryPolicy(DiagnosticsClientContext diagnosticsClientContext, GlobalEndpointManager globalEndpointManager, boolean enableEndpointDiscovery, ThrottlingRetryOptions throttlingRetryOptions, RxCollectionCache rxCollectionCache) { this.globalEndpointManager = globalEndpointManager; this.failoverRetryCount = 0; this.enableEndpointDiscovery = enableEndpointDiscovery; this.sessionTokenRetryCount = 0; this.staleContainerRetryCount = 0; this.canUseMultipleWriteLocations = false; this.cosmosDiagnostics = diagnosticsClientContext.createDiagnostics(); this.throttlingRetry = new ResourceThrottleRetryPolicy( throttlingRetryOptions.getMaxRetryAttemptsOnThrottledRequests(), throttlingRetryOptions.getMaxRetryWaitTime(), BridgeInternal.getRetryContext(this.getCosmosDiagnostics()), false); this.rxCollectionCache = rxCollectionCache; } @Override public Mono<ShouldRetryResult> shouldRetry(Exception e) { logger.debug("retry count {}, isReadRequest {}, canUseMultipleWriteLocations {}, due to failure:", cnt.incrementAndGet(), isReadRequest, canUseMultipleWriteLocations, e); if (this.locationEndpoint == null) { logger.error("locationEndpoint is null because ClientRetryPolicy::onBeforeRequest(.) is not invoked, " + "probably request creation failed due to invalid options, serialization setting, etc."); return Mono.just(ShouldRetryResult.error(e)); } this.retryContext = null; CosmosException clientException = Utils.as(e, CosmosException.class); if (clientException != null && clientException.getDiagnostics() != null) { this.cosmosDiagnostics = clientException.getDiagnostics(); } if (clientException != null && Exceptions.isStatusCode(clientException, HttpConstants.StatusCodes.FORBIDDEN) && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.FORBIDDEN_WRITEFORBIDDEN)) { logger.warn("Endpoint not writable. Will refresh cache and retry ", e); return this.shouldRetryOnEndpointFailureAsync(false, true); } if (clientException != null && Exceptions.isStatusCode(clientException, HttpConstants.StatusCodes.FORBIDDEN) && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.DATABASE_ACCOUNT_NOTFOUND) && this.isReadRequest) { logger.warn("Endpoint not available for reads. Will refresh cache and retry. ", e); return this.shouldRetryOnEndpointFailureAsync(true, false); } if (WebExceptionUtility.isNetworkFailure(e)) { if (clientException != null && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE)) { if (this.isReadRequest || WebExceptionUtility.isWebExceptionRetriable(e)) { logger.warn("Gateway endpoint not reachable. Will refresh cache and retry. ", e); return this.shouldRetryOnEndpointFailureAsync(this.isReadRequest, false); } else { return this.shouldNotRetryOnEndpointFailureAsync(this.isReadRequest, false); } } else if (clientException != null && WebExceptionUtility.isReadTimeoutException(clientException) && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT)) { if (this.request.getOperationType() == OperationType.QueryPlan || this.request.isAddressRefresh()) { return shouldRetryQueryPlanAndAddress(); } } else { logger.warn("Backend endpoint not reachable. ", e); return this.shouldRetryOnBackendServiceUnavailableAsync(this.isReadRequest, WebExceptionUtility .isWebExceptionRetriable(e)); } } if (clientException != null && Exceptions.isStatusCode(clientException, HttpConstants.StatusCodes.NOTFOUND) && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)) { return Mono.just(this.shouldRetryOnSessionNotAvailable()); } if (clientException != null && Exceptions.isStatusCode(clientException, HttpConstants.StatusCodes.BADREQUEST) && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.INCORRECT_CONTAINER_RID_SUB_STATUS)) { return this.shouldRetryOnStaleContainer(); } return this.throttlingRetry.shouldRetry(e); } private Mono<ShouldRetryResult> shouldRetryQueryPlanAndAddress() { if (this.queryPlanAddressRefreshCount++ > MAX_QUERY_PLAN_AND_ADDRESS_RETRY_COUNT) { logger .warn( "shouldRetryQueryPlanAndAddress() No more retrying on endpoint {}, operationType = {}, count = {}, " + "isAddressRefresh = {}", this.locationEndpoint, this.request.getOperationType(), this.queryPlanAddressRefreshCount, this.request.isAddressRefresh()); return Mono.just(ShouldRetryResult.noRetry()); } logger .warn("shouldRetryQueryPlanAndAddress() Retrying on endpoint {}, operationType = {}, count = {}, " + "isAddressRefresh = {}, shouldForcedAddressRefresh = {}, " + "shouldForceCollectionRoutingMapRefresh = {}", this.locationEndpoint, this.request.getOperationType(), this.queryPlanAddressRefreshCount, this.request.isAddressRefresh(), this.request.shouldForceAddressRefresh(), this.request.forceCollectionRoutingMapRefresh); Duration retryDelay = Duration.ZERO; return Mono.just(ShouldRetryResult.retryAfter(retryDelay)); } private ShouldRetryResult shouldRetryOnSessionNotAvailable() { this.sessionTokenRetryCount++; if (!this.enableEndpointDiscovery) { return ShouldRetryResult.noRetry(); } else { if (this.canUseMultipleWriteLocations) { UnmodifiableList<URI> endpoints = this.isReadRequest ? this.globalEndpointManager.getReadEndpoints() : this.globalEndpointManager.getWriteEndpoints(); if (this.sessionTokenRetryCount > endpoints.size()) { return ShouldRetryResult.noRetry(); } else { this.retryContext = new RetryContext(this.sessionTokenRetryCount , true); return ShouldRetryResult.retryAfter(Duration.ZERO); } } else { if (this.sessionTokenRetryCount > 1) { return ShouldRetryResult.noRetry(); } else { this.retryContext = new RetryContext(0, false); return ShouldRetryResult.retryAfter(Duration.ZERO); } } } } private Mono<ShouldRetryResult> shouldRetryOnEndpointFailureAsync(boolean isReadRequest , boolean forceRefresh) { if (!this.enableEndpointDiscovery || this.failoverRetryCount > MaxRetryCount) { logger.warn("ShouldRetryOnEndpointFailureAsync() Not retrying. Retry count = {}", this.failoverRetryCount); return Mono.just(ShouldRetryResult.noRetry()); } Mono<Void> refreshLocationCompletable = this.refreshLocation(isReadRequest, forceRefresh); Duration retryDelay = Duration.ZERO; if (!isReadRequest) { logger.debug("Failover happening. retryCount {}", this.failoverRetryCount); if (this.failoverRetryCount > 1) { retryDelay = Duration.ofMillis(ClientRetryPolicy.RetryIntervalInMS); } } else { retryDelay = Duration.ofMillis(ClientRetryPolicy.RetryIntervalInMS); } return refreshLocationCompletable.then(Mono.just(ShouldRetryResult.retryAfter(retryDelay))); } private Mono<ShouldRetryResult> shouldNotRetryOnEndpointFailureAsync(boolean isReadRequest , boolean forceRefresh) { if (!this.enableEndpointDiscovery || this.failoverRetryCount > MaxRetryCount) { logger.warn("ShouldRetryOnEndpointFailureAsync() Not retrying. Retry count = {}", this.failoverRetryCount); return Mono.just(ShouldRetryResult.noRetry()); } Mono<Void> refreshLocationCompletable = this.refreshLocation(isReadRequest, forceRefresh); return refreshLocationCompletable.then(Mono.just(ShouldRetryResult.noRetry())); } private Mono<Void> refreshLocation(boolean isReadRequest, boolean forceRefresh) { this.failoverRetryCount++; if (isReadRequest) { logger.warn("marking the endpoint {} as unavailable for read",this.locationEndpoint); this.globalEndpointManager.markEndpointUnavailableForRead(this.locationEndpoint); } else { logger.warn("marking the endpoint {} as unavailable for write",this.locationEndpoint); this.globalEndpointManager.markEndpointUnavailableForWrite(this.locationEndpoint); } this.retryContext = new RetryContext(this.failoverRetryCount, false); return this.globalEndpointManager.refreshLocationAsync(null, forceRefresh); } private Mono<ShouldRetryResult> shouldRetryOnBackendServiceUnavailableAsync(boolean isReadRequest, boolean isWebExceptionRetriable) { if (!isReadRequest && !isWebExceptionRetriable) { logger.warn("shouldRetryOnBackendServiceUnavailableAsync() Not retrying on write with non retriable exception. Retry count = {}", this.serviceUnavailableRetryCount); return Mono.just(ShouldRetryResult.noRetry()); } if (this.serviceUnavailableRetryCount++ > MaxServiceUnavailableRetryCount) { logger.warn("shouldRetryOnBackendServiceUnavailableAsync() Not retrying. Retry count = {}", this.serviceUnavailableRetryCount); return Mono.just(ShouldRetryResult.noRetry()); } if (!this.canUseMultipleWriteLocations && !isReadRequest) { return Mono.just(ShouldRetryResult.noRetry()); } int availablePreferredLocations = this.globalEndpointManager.getPreferredLocationCount(); if (availablePreferredLocations <= 1) { logger.warn("shouldRetryOnServiceUnavailable() Not retrying. No other regions available for the request. AvailablePreferredLocations = {}", availablePreferredLocations); return Mono.just(ShouldRetryResult.noRetry()); } logger.warn("shouldRetryOnServiceUnavailable() Retrying. Received on endpoint {}, IsReadRequest = {}", this.locationEndpoint, isReadRequest); this.retryContext = new RetryContext(this.serviceUnavailableRetryCount, true); return Mono.just(ShouldRetryResult.retryAfter(Duration.ZERO)); } @Override public void onBeforeSendRequest(RxDocumentServiceRequest request) { this.request = request; this.isReadRequest = request.isReadOnlyRequest(); this.canUseMultipleWriteLocations = this.globalEndpointManager.canUseMultipleWriteLocations(request); if (request.requestContext != null) { request.requestContext.cosmosDiagnostics = this.cosmosDiagnostics; } if (request.requestContext != null) { request.requestContext.clearRouteToLocation(); } if (this.retryContext != null) { request.requestContext.routeToLocation(this.retryContext.retryCount, this.retryContext.retryRequestOnPreferredLocations); } this.locationEndpoint = this.globalEndpointManager.resolveServiceEndpoint(request); if (request.requestContext != null) { request.requestContext.routeToLocation(this.locationEndpoint); } } @Override public com.azure.cosmos.implementation.RetryContext getRetryContext() { return BridgeInternal.getRetryContext(this.getCosmosDiagnostics()); } CosmosDiagnostics getCosmosDiagnostics() { return cosmosDiagnostics; } private static class RetryContext { public int retryCount; public boolean retryRequestOnPreferredLocations; public RetryContext(int retryCount, boolean retryRequestOnPreferredLocations) { this.retryCount = retryCount; this.retryRequestOnPreferredLocations = retryRequestOnPreferredLocations; } } }
what does " if intendedCollectionRid was passed by outside sdk" mean? what's the scenario?
private Mono<ShouldRetryResult> shouldRetryOnStaleContainer() { this.staleContainerRetryCount++; if (this.rxCollectionCache == null || this.staleContainerRetryCount > 1) { return Mono.just(ShouldRetryResult.noRetry()); } this.request.setForceNameCacheRefresh(true); if(request.intendedCollectionRidPassedIntoSDK) { return this.rxCollectionCache.refreshAsync(null, this.request).then( Mono.just(ShouldRetryResult.noRetry())); } if(StringUtils.isNotEmpty(request.getHeaders().get(INTENDED_COLLECTION_RID_HEADER))) { request.getHeaders().remove(INTENDED_COLLECTION_RID_HEADER); } return this.rxCollectionCache.refreshAsync(null, this.request).then(Mono.just(ShouldRetryResult.retryAfter(Duration.ZERO))); }
private Mono<ShouldRetryResult> shouldRetryOnStaleContainer() { this.staleContainerRetryCount++; if (this.rxCollectionCache == null || this.staleContainerRetryCount > 1) { return Mono.just(ShouldRetryResult.noRetry()); } this.request.setForceNameCacheRefresh(true); if(request.intendedCollectionRidPassedIntoSDK) { return this.rxCollectionCache.refreshAsync(null, this.request).then( Mono.just(ShouldRetryResult.noRetry())); } if(StringUtils.isNotEmpty(request.getHeaders().get(INTENDED_COLLECTION_RID_HEADER))) { request.getHeaders().remove(INTENDED_COLLECTION_RID_HEADER); } return this.rxCollectionCache.refreshAsync(null, this.request).then(Mono.just(ShouldRetryResult.retryAfter(Duration.ZERO))); }
class ClientRetryPolicy extends DocumentClientRetryPolicy { private final static Logger logger = LoggerFactory.getLogger(ClientRetryPolicy.class); final static int RetryIntervalInMS = 1000; final static int MaxRetryCount = 120; private final static int MaxServiceUnavailableRetryCount = 1; private final static int MAX_QUERY_PLAN_AND_ADDRESS_RETRY_COUNT = 2; private final DocumentClientRetryPolicy throttlingRetry; private final GlobalEndpointManager globalEndpointManager; private final boolean enableEndpointDiscovery; private int failoverRetryCount; private int sessionTokenRetryCount; private int staleContainerRetryCount; private boolean isReadRequest; private boolean canUseMultipleWriteLocations; private URI locationEndpoint; private RetryContext retryContext; private CosmosDiagnostics cosmosDiagnostics; private AtomicInteger cnt = new AtomicInteger(0); private int serviceUnavailableRetryCount; private int queryPlanAddressRefreshCount; private RxDocumentServiceRequest request; private RxCollectionCache rxCollectionCache; public ClientRetryPolicy(DiagnosticsClientContext diagnosticsClientContext, GlobalEndpointManager globalEndpointManager, boolean enableEndpointDiscovery, ThrottlingRetryOptions throttlingRetryOptions, RxCollectionCache rxCollectionCache) { this.globalEndpointManager = globalEndpointManager; this.failoverRetryCount = 0; this.enableEndpointDiscovery = enableEndpointDiscovery; this.sessionTokenRetryCount = 0; this.staleContainerRetryCount = 0; this.canUseMultipleWriteLocations = false; this.cosmosDiagnostics = diagnosticsClientContext.createDiagnostics(); this.throttlingRetry = new ResourceThrottleRetryPolicy( throttlingRetryOptions.getMaxRetryAttemptsOnThrottledRequests(), throttlingRetryOptions.getMaxRetryWaitTime(), BridgeInternal.getRetryContext(this.getCosmosDiagnostics()), false); this.rxCollectionCache = rxCollectionCache; } @Override public Mono<ShouldRetryResult> shouldRetry(Exception e) { logger.debug("retry count {}, isReadRequest {}, canUseMultipleWriteLocations {}, due to failure:", cnt.incrementAndGet(), isReadRequest, canUseMultipleWriteLocations, e); if (this.locationEndpoint == null) { logger.error("locationEndpoint is null because ClientRetryPolicy::onBeforeRequest(.) is not invoked, " + "probably request creation failed due to invalid options, serialization setting, etc."); return Mono.just(ShouldRetryResult.error(e)); } this.retryContext = null; CosmosException clientException = Utils.as(e, CosmosException.class); if (clientException != null && clientException.getDiagnostics() != null) { this.cosmosDiagnostics = clientException.getDiagnostics(); } if (clientException != null && Exceptions.isStatusCode(clientException, HttpConstants.StatusCodes.FORBIDDEN) && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.FORBIDDEN_WRITEFORBIDDEN)) { logger.warn("Endpoint not writable. Will refresh cache and retry ", e); return this.shouldRetryOnEndpointFailureAsync(false, true); } if (clientException != null && Exceptions.isStatusCode(clientException, HttpConstants.StatusCodes.FORBIDDEN) && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.DATABASE_ACCOUNT_NOTFOUND) && this.isReadRequest) { logger.warn("Endpoint not available for reads. Will refresh cache and retry. ", e); return this.shouldRetryOnEndpointFailureAsync(true, false); } if (WebExceptionUtility.isNetworkFailure(e)) { if (clientException != null && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE)) { if (this.isReadRequest || WebExceptionUtility.isWebExceptionRetriable(e)) { logger.warn("Gateway endpoint not reachable. Will refresh cache and retry. ", e); return this.shouldRetryOnEndpointFailureAsync(this.isReadRequest, false); } else { return this.shouldNotRetryOnEndpointFailureAsync(this.isReadRequest, false); } } else if (clientException != null && WebExceptionUtility.isReadTimeoutException(clientException) && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT)) { if (this.request.getOperationType() == OperationType.QueryPlan || this.request.isAddressRefresh()) { return shouldRetryQueryPlanAndAddress(); } } else { logger.warn("Backend endpoint not reachable. ", e); return this.shouldRetryOnBackendServiceUnavailableAsync(this.isReadRequest, WebExceptionUtility .isWebExceptionRetriable(e)); } } if (clientException != null && Exceptions.isStatusCode(clientException, HttpConstants.StatusCodes.NOTFOUND) && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)) { return Mono.just(this.shouldRetryOnSessionNotAvailable()); } if (clientException != null && Exceptions.isStatusCode(clientException, HttpConstants.StatusCodes.BADREQUEST) && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.INCORRECT_CONTAINER_RID_SUB_STATUS)) { return this.shouldRetryOnStaleContainer(); } return this.throttlingRetry.shouldRetry(e); } private Mono<ShouldRetryResult> shouldRetryQueryPlanAndAddress() { if (this.queryPlanAddressRefreshCount++ > MAX_QUERY_PLAN_AND_ADDRESS_RETRY_COUNT) { logger .warn( "shouldRetryQueryPlanAndAddress() No more retrying on endpoint {}, operationType = {}, count = {}, " + "isAddressRefresh = {}", this.locationEndpoint, this.request.getOperationType(), this.queryPlanAddressRefreshCount, this.request.isAddressRefresh()); return Mono.just(ShouldRetryResult.noRetry()); } logger .warn("shouldRetryQueryPlanAndAddress() Retrying on endpoint {}, operationType = {}, count = {}, " + "isAddressRefresh = {}, shouldForcedAddressRefresh = {}, " + "shouldForceCollectionRoutingMapRefresh = {}", this.locationEndpoint, this.request.getOperationType(), this.queryPlanAddressRefreshCount, this.request.isAddressRefresh(), this.request.shouldForceAddressRefresh(), this.request.forceCollectionRoutingMapRefresh); Duration retryDelay = Duration.ZERO; return Mono.just(ShouldRetryResult.retryAfter(retryDelay)); } private ShouldRetryResult shouldRetryOnSessionNotAvailable() { this.sessionTokenRetryCount++; if (!this.enableEndpointDiscovery) { return ShouldRetryResult.noRetry(); } else { if (this.canUseMultipleWriteLocations) { UnmodifiableList<URI> endpoints = this.isReadRequest ? this.globalEndpointManager.getReadEndpoints() : this.globalEndpointManager.getWriteEndpoints(); if (this.sessionTokenRetryCount > endpoints.size()) { return ShouldRetryResult.noRetry(); } else { this.retryContext = new RetryContext(this.sessionTokenRetryCount , true); return ShouldRetryResult.retryAfter(Duration.ZERO); } } else { if (this.sessionTokenRetryCount > 1) { return ShouldRetryResult.noRetry(); } else { this.retryContext = new RetryContext(0, false); return ShouldRetryResult.retryAfter(Duration.ZERO); } } } } private Mono<ShouldRetryResult> shouldRetryOnEndpointFailureAsync(boolean isReadRequest , boolean forceRefresh) { if (!this.enableEndpointDiscovery || this.failoverRetryCount > MaxRetryCount) { logger.warn("ShouldRetryOnEndpointFailureAsync() Not retrying. Retry count = {}", this.failoverRetryCount); return Mono.just(ShouldRetryResult.noRetry()); } Mono<Void> refreshLocationCompletable = this.refreshLocation(isReadRequest, forceRefresh); Duration retryDelay = Duration.ZERO; if (!isReadRequest) { logger.debug("Failover happening. retryCount {}", this.failoverRetryCount); if (this.failoverRetryCount > 1) { retryDelay = Duration.ofMillis(ClientRetryPolicy.RetryIntervalInMS); } } else { retryDelay = Duration.ofMillis(ClientRetryPolicy.RetryIntervalInMS); } return refreshLocationCompletable.then(Mono.just(ShouldRetryResult.retryAfter(retryDelay))); } private Mono<ShouldRetryResult> shouldNotRetryOnEndpointFailureAsync(boolean isReadRequest , boolean forceRefresh) { if (!this.enableEndpointDiscovery || this.failoverRetryCount > MaxRetryCount) { logger.warn("ShouldRetryOnEndpointFailureAsync() Not retrying. Retry count = {}", this.failoverRetryCount); return Mono.just(ShouldRetryResult.noRetry()); } Mono<Void> refreshLocationCompletable = this.refreshLocation(isReadRequest, forceRefresh); return refreshLocationCompletable.then(Mono.just(ShouldRetryResult.noRetry())); } private Mono<Void> refreshLocation(boolean isReadRequest, boolean forceRefresh) { this.failoverRetryCount++; if (isReadRequest) { logger.warn("marking the endpoint {} as unavailable for read",this.locationEndpoint); this.globalEndpointManager.markEndpointUnavailableForRead(this.locationEndpoint); } else { logger.warn("marking the endpoint {} as unavailable for write",this.locationEndpoint); this.globalEndpointManager.markEndpointUnavailableForWrite(this.locationEndpoint); } this.retryContext = new RetryContext(this.failoverRetryCount, false); return this.globalEndpointManager.refreshLocationAsync(null, forceRefresh); } private Mono<ShouldRetryResult> shouldRetryOnBackendServiceUnavailableAsync(boolean isReadRequest, boolean isWebExceptionRetriable) { if (!isReadRequest && !isWebExceptionRetriable) { logger.warn("shouldRetryOnBackendServiceUnavailableAsync() Not retrying on write with non retriable exception. Retry count = {}", this.serviceUnavailableRetryCount); return Mono.just(ShouldRetryResult.noRetry()); } if (this.serviceUnavailableRetryCount++ > MaxServiceUnavailableRetryCount) { logger.warn("shouldRetryOnBackendServiceUnavailableAsync() Not retrying. Retry count = {}", this.serviceUnavailableRetryCount); return Mono.just(ShouldRetryResult.noRetry()); } if (!this.canUseMultipleWriteLocations && !isReadRequest) { return Mono.just(ShouldRetryResult.noRetry()); } int availablePreferredLocations = this.globalEndpointManager.getPreferredLocationCount(); if (availablePreferredLocations <= 1) { logger.warn("shouldRetryOnServiceUnavailable() Not retrying. No other regions available for the request. AvailablePreferredLocations = {}", availablePreferredLocations); return Mono.just(ShouldRetryResult.noRetry()); } logger.warn("shouldRetryOnServiceUnavailable() Retrying. Received on endpoint {}, IsReadRequest = {}", this.locationEndpoint, isReadRequest); this.retryContext = new RetryContext(this.serviceUnavailableRetryCount, true); return Mono.just(ShouldRetryResult.retryAfter(Duration.ZERO)); } @Override public void onBeforeSendRequest(RxDocumentServiceRequest request) { this.request = request; this.isReadRequest = request.isReadOnlyRequest(); this.canUseMultipleWriteLocations = this.globalEndpointManager.canUseMultipleWriteLocations(request); if (request.requestContext != null) { request.requestContext.cosmosDiagnostics = this.cosmosDiagnostics; } if (request.requestContext != null) { request.requestContext.clearRouteToLocation(); } if (this.retryContext != null) { request.requestContext.routeToLocation(this.retryContext.retryCount, this.retryContext.retryRequestOnPreferredLocations); } this.locationEndpoint = this.globalEndpointManager.resolveServiceEndpoint(request); if (request.requestContext != null) { request.requestContext.routeToLocation(this.locationEndpoint); } } @Override public com.azure.cosmos.implementation.RetryContext getRetryContext() { return BridgeInternal.getRetryContext(this.getCosmosDiagnostics()); } CosmosDiagnostics getCosmosDiagnostics() { return cosmosDiagnostics; } private static class RetryContext { public int retryCount; public boolean retryRequestOnPreferredLocations; public RetryContext(int retryCount, boolean retryRequestOnPreferredLocations) { this.retryCount = retryCount; this.retryRequestOnPreferredLocations = retryRequestOnPreferredLocations; } } }
class ClientRetryPolicy extends DocumentClientRetryPolicy { private final static Logger logger = LoggerFactory.getLogger(ClientRetryPolicy.class); final static int RetryIntervalInMS = 1000; final static int MaxRetryCount = 120; private final static int MaxServiceUnavailableRetryCount = 1; private final static int MAX_QUERY_PLAN_AND_ADDRESS_RETRY_COUNT = 2; private final DocumentClientRetryPolicy throttlingRetry; private final GlobalEndpointManager globalEndpointManager; private final boolean enableEndpointDiscovery; private int failoverRetryCount; private int sessionTokenRetryCount; private int staleContainerRetryCount; private boolean isReadRequest; private boolean canUseMultipleWriteLocations; private URI locationEndpoint; private RetryContext retryContext; private CosmosDiagnostics cosmosDiagnostics; private AtomicInteger cnt = new AtomicInteger(0); private int serviceUnavailableRetryCount; private int queryPlanAddressRefreshCount; private RxDocumentServiceRequest request; private RxCollectionCache rxCollectionCache; public ClientRetryPolicy(DiagnosticsClientContext diagnosticsClientContext, GlobalEndpointManager globalEndpointManager, boolean enableEndpointDiscovery, ThrottlingRetryOptions throttlingRetryOptions, RxCollectionCache rxCollectionCache) { this.globalEndpointManager = globalEndpointManager; this.failoverRetryCount = 0; this.enableEndpointDiscovery = enableEndpointDiscovery; this.sessionTokenRetryCount = 0; this.staleContainerRetryCount = 0; this.canUseMultipleWriteLocations = false; this.cosmosDiagnostics = diagnosticsClientContext.createDiagnostics(); this.throttlingRetry = new ResourceThrottleRetryPolicy( throttlingRetryOptions.getMaxRetryAttemptsOnThrottledRequests(), throttlingRetryOptions.getMaxRetryWaitTime(), BridgeInternal.getRetryContext(this.getCosmosDiagnostics()), false); this.rxCollectionCache = rxCollectionCache; } @Override public Mono<ShouldRetryResult> shouldRetry(Exception e) { logger.debug("retry count {}, isReadRequest {}, canUseMultipleWriteLocations {}, due to failure:", cnt.incrementAndGet(), isReadRequest, canUseMultipleWriteLocations, e); if (this.locationEndpoint == null) { logger.error("locationEndpoint is null because ClientRetryPolicy::onBeforeRequest(.) is not invoked, " + "probably request creation failed due to invalid options, serialization setting, etc."); return Mono.just(ShouldRetryResult.error(e)); } this.retryContext = null; CosmosException clientException = Utils.as(e, CosmosException.class); if (clientException != null && clientException.getDiagnostics() != null) { this.cosmosDiagnostics = clientException.getDiagnostics(); } if (clientException != null && Exceptions.isStatusCode(clientException, HttpConstants.StatusCodes.FORBIDDEN) && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.FORBIDDEN_WRITEFORBIDDEN)) { logger.warn("Endpoint not writable. Will refresh cache and retry ", e); return this.shouldRetryOnEndpointFailureAsync(false, true); } if (clientException != null && Exceptions.isStatusCode(clientException, HttpConstants.StatusCodes.FORBIDDEN) && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.DATABASE_ACCOUNT_NOTFOUND) && this.isReadRequest) { logger.warn("Endpoint not available for reads. Will refresh cache and retry. ", e); return this.shouldRetryOnEndpointFailureAsync(true, false); } if (WebExceptionUtility.isNetworkFailure(e)) { if (clientException != null && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE)) { if (this.isReadRequest || WebExceptionUtility.isWebExceptionRetriable(e)) { logger.warn("Gateway endpoint not reachable. Will refresh cache and retry. ", e); return this.shouldRetryOnEndpointFailureAsync(this.isReadRequest, false); } else { return this.shouldNotRetryOnEndpointFailureAsync(this.isReadRequest, false); } } else if (clientException != null && WebExceptionUtility.isReadTimeoutException(clientException) && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT)) { if (this.request.getOperationType() == OperationType.QueryPlan || this.request.isAddressRefresh()) { return shouldRetryQueryPlanAndAddress(); } } else { logger.warn("Backend endpoint not reachable. ", e); return this.shouldRetryOnBackendServiceUnavailableAsync(this.isReadRequest, WebExceptionUtility .isWebExceptionRetriable(e)); } } if (clientException != null && Exceptions.isStatusCode(clientException, HttpConstants.StatusCodes.NOTFOUND) && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)) { return Mono.just(this.shouldRetryOnSessionNotAvailable()); } if (clientException != null && Exceptions.isStatusCode(clientException, HttpConstants.StatusCodes.BADREQUEST) && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.INCORRECT_CONTAINER_RID_SUB_STATUS)) { return this.shouldRetryOnStaleContainer(); } return this.throttlingRetry.shouldRetry(e); } private Mono<ShouldRetryResult> shouldRetryQueryPlanAndAddress() { if (this.queryPlanAddressRefreshCount++ > MAX_QUERY_PLAN_AND_ADDRESS_RETRY_COUNT) { logger .warn( "shouldRetryQueryPlanAndAddress() No more retrying on endpoint {}, operationType = {}, count = {}, " + "isAddressRefresh = {}", this.locationEndpoint, this.request.getOperationType(), this.queryPlanAddressRefreshCount, this.request.isAddressRefresh()); return Mono.just(ShouldRetryResult.noRetry()); } logger .warn("shouldRetryQueryPlanAndAddress() Retrying on endpoint {}, operationType = {}, count = {}, " + "isAddressRefresh = {}, shouldForcedAddressRefresh = {}, " + "shouldForceCollectionRoutingMapRefresh = {}", this.locationEndpoint, this.request.getOperationType(), this.queryPlanAddressRefreshCount, this.request.isAddressRefresh(), this.request.shouldForceAddressRefresh(), this.request.forceCollectionRoutingMapRefresh); Duration retryDelay = Duration.ZERO; return Mono.just(ShouldRetryResult.retryAfter(retryDelay)); } private ShouldRetryResult shouldRetryOnSessionNotAvailable() { this.sessionTokenRetryCount++; if (!this.enableEndpointDiscovery) { return ShouldRetryResult.noRetry(); } else { if (this.canUseMultipleWriteLocations) { UnmodifiableList<URI> endpoints = this.isReadRequest ? this.globalEndpointManager.getReadEndpoints() : this.globalEndpointManager.getWriteEndpoints(); if (this.sessionTokenRetryCount > endpoints.size()) { return ShouldRetryResult.noRetry(); } else { this.retryContext = new RetryContext(this.sessionTokenRetryCount , true); return ShouldRetryResult.retryAfter(Duration.ZERO); } } else { if (this.sessionTokenRetryCount > 1) { return ShouldRetryResult.noRetry(); } else { this.retryContext = new RetryContext(0, false); return ShouldRetryResult.retryAfter(Duration.ZERO); } } } } private Mono<ShouldRetryResult> shouldRetryOnEndpointFailureAsync(boolean isReadRequest , boolean forceRefresh) { if (!this.enableEndpointDiscovery || this.failoverRetryCount > MaxRetryCount) { logger.warn("ShouldRetryOnEndpointFailureAsync() Not retrying. Retry count = {}", this.failoverRetryCount); return Mono.just(ShouldRetryResult.noRetry()); } Mono<Void> refreshLocationCompletable = this.refreshLocation(isReadRequest, forceRefresh); Duration retryDelay = Duration.ZERO; if (!isReadRequest) { logger.debug("Failover happening. retryCount {}", this.failoverRetryCount); if (this.failoverRetryCount > 1) { retryDelay = Duration.ofMillis(ClientRetryPolicy.RetryIntervalInMS); } } else { retryDelay = Duration.ofMillis(ClientRetryPolicy.RetryIntervalInMS); } return refreshLocationCompletable.then(Mono.just(ShouldRetryResult.retryAfter(retryDelay))); } private Mono<ShouldRetryResult> shouldNotRetryOnEndpointFailureAsync(boolean isReadRequest , boolean forceRefresh) { if (!this.enableEndpointDiscovery || this.failoverRetryCount > MaxRetryCount) { logger.warn("ShouldRetryOnEndpointFailureAsync() Not retrying. Retry count = {}", this.failoverRetryCount); return Mono.just(ShouldRetryResult.noRetry()); } Mono<Void> refreshLocationCompletable = this.refreshLocation(isReadRequest, forceRefresh); return refreshLocationCompletable.then(Mono.just(ShouldRetryResult.noRetry())); } private Mono<Void> refreshLocation(boolean isReadRequest, boolean forceRefresh) { this.failoverRetryCount++; if (isReadRequest) { logger.warn("marking the endpoint {} as unavailable for read",this.locationEndpoint); this.globalEndpointManager.markEndpointUnavailableForRead(this.locationEndpoint); } else { logger.warn("marking the endpoint {} as unavailable for write",this.locationEndpoint); this.globalEndpointManager.markEndpointUnavailableForWrite(this.locationEndpoint); } this.retryContext = new RetryContext(this.failoverRetryCount, false); return this.globalEndpointManager.refreshLocationAsync(null, forceRefresh); } private Mono<ShouldRetryResult> shouldRetryOnBackendServiceUnavailableAsync(boolean isReadRequest, boolean isWebExceptionRetriable) { if (!isReadRequest && !isWebExceptionRetriable) { logger.warn("shouldRetryOnBackendServiceUnavailableAsync() Not retrying on write with non retriable exception. Retry count = {}", this.serviceUnavailableRetryCount); return Mono.just(ShouldRetryResult.noRetry()); } if (this.serviceUnavailableRetryCount++ > MaxServiceUnavailableRetryCount) { logger.warn("shouldRetryOnBackendServiceUnavailableAsync() Not retrying. Retry count = {}", this.serviceUnavailableRetryCount); return Mono.just(ShouldRetryResult.noRetry()); } if (!this.canUseMultipleWriteLocations && !isReadRequest) { return Mono.just(ShouldRetryResult.noRetry()); } int availablePreferredLocations = this.globalEndpointManager.getPreferredLocationCount(); if (availablePreferredLocations <= 1) { logger.warn("shouldRetryOnServiceUnavailable() Not retrying. No other regions available for the request. AvailablePreferredLocations = {}", availablePreferredLocations); return Mono.just(ShouldRetryResult.noRetry()); } logger.warn("shouldRetryOnServiceUnavailable() Retrying. Received on endpoint {}, IsReadRequest = {}", this.locationEndpoint, isReadRequest); this.retryContext = new RetryContext(this.serviceUnavailableRetryCount, true); return Mono.just(ShouldRetryResult.retryAfter(Duration.ZERO)); } @Override public void onBeforeSendRequest(RxDocumentServiceRequest request) { this.request = request; this.isReadRequest = request.isReadOnlyRequest(); this.canUseMultipleWriteLocations = this.globalEndpointManager.canUseMultipleWriteLocations(request); if (request.requestContext != null) { request.requestContext.cosmosDiagnostics = this.cosmosDiagnostics; } if (request.requestContext != null) { request.requestContext.clearRouteToLocation(); } if (this.retryContext != null) { request.requestContext.routeToLocation(this.retryContext.retryCount, this.retryContext.retryRequestOnPreferredLocations); } this.locationEndpoint = this.globalEndpointManager.resolveServiceEndpoint(request); if (request.requestContext != null) { request.requestContext.routeToLocation(this.locationEndpoint); } } @Override public com.azure.cosmos.implementation.RetryContext getRetryContext() { return BridgeInternal.getRetryContext(this.getCosmosDiagnostics()); } CosmosDiagnostics getCosmosDiagnostics() { return cosmosDiagnostics; } private static class RetryContext { public int retryCount; public boolean retryRequestOnPreferredLocations; public RetryContext(int retryCount, boolean retryRequestOnPreferredLocations) { this.retryCount = retryCount; this.retryRequestOnPreferredLocations = retryRequestOnPreferredLocations; } } }
Use `Collections.emptyList()`. Same for below.
public List<String> applicationGatewayBackendAddressPoolsIds() { VirtualMachineScaleSetIpConfiguration nicIpConfig = this.primaryNicDefaultIpConfiguration(); if (nicIpConfig == null) { return new ArrayList<>(); } List<SubResource> backendPools = nicIpConfig.applicationGatewayBackendAddressPools(); List<String> result = new ArrayList<>(); if (backendPools != null) { for (SubResource backendPool : backendPools) { result.add(backendPool.id()); } } return result; }
return new ArrayList<>();
public List<String> applicationGatewayBackendAddressPoolsIds() { VirtualMachineScaleSetIpConfiguration nicIpConfig = this.primaryNicDefaultIpConfiguration(); if (nicIpConfig == null) { return Collections.emptyList(); } List<SubResource> backendPools = nicIpConfig.applicationGatewayBackendAddressPools(); List<String> result = new ArrayList<>(); if (backendPools != null) { for (SubResource backendPool : backendPools) { result.add(backendPool.id()); } } return result; }
class VirtualMachineScaleSetImpl extends GroupableParentResourceImpl< VirtualMachineScaleSet, VirtualMachineScaleSetInner, VirtualMachineScaleSetImpl, ComputeManager> implements VirtualMachineScaleSet, VirtualMachineScaleSet.DefinitionManagedOrUnmanaged, VirtualMachineScaleSet.DefinitionManaged, VirtualMachineScaleSet.DefinitionUnmanaged, VirtualMachineScaleSet.Update, VirtualMachineScaleSet.DefinitionStages.WithSystemAssignedIdentityBasedAccessOrCreate, VirtualMachineScaleSet.DefinitionStages.WithUserAssignedManagedServiceIdentity, VirtualMachineScaleSet.UpdateStages.WithSystemAssignedIdentityBasedAccessOrApply, VirtualMachineScaleSet.UpdateStages.WithUserAssignedManagedServiceIdentity { private final StorageManager storageManager; private final NetworkManager networkManager; private final IdentifierProvider namer; private boolean isMarketplaceLinuxImage = false; private String existingPrimaryNetworkSubnetNameToAssociate; private List<String> creatableStorageAccountKeys = new ArrayList<>(); private List<StorageAccount> existingStorageAccountsToAssociate = new ArrayList<>(); private Map<String, VirtualMachineScaleSetExtension> extensions; private LoadBalancer primaryInternetFacingLoadBalancer; private LoadBalancer primaryInternalLoadBalancer; private boolean removePrimaryInternetFacingLoadBalancerOnUpdate; private boolean removePrimaryInternalLoadBalancerOnUpdate; private LoadBalancer primaryInternetFacingLoadBalancerToAttachOnUpdate; private LoadBalancer primaryInternalLoadBalancerToAttachOnUpdate; private List<String> primaryInternetFacingLBBackendsToRemoveOnUpdate = new ArrayList<>(); private List<String> primaryInternetFacingLBInboundNatPoolsToRemoveOnUpdate = new ArrayList<>(); private List<String> primaryInternalLBBackendsToRemoveOnUpdate = new ArrayList<>(); private List<String> primaryInternalLBInboundNatPoolsToRemoveOnUpdate = new ArrayList<>(); private List<String> primaryInternetFacingLBBackendsToAddOnUpdate = new ArrayList<>(); private List<String> primaryInternetFacingLBInboundNatPoolsToAddOnUpdate = new ArrayList<>(); private List<String> primaryInternalLBBackendsToAddOnUpdate = new ArrayList<>(); private List<String> primaryInternalLBInboundNatPoolsToAddOnUpdate = new ArrayList<>(); private boolean isUnmanagedDiskSelected; private final ManagedDataDiskCollection managedDataDisks; VirtualMachineScaleSetMsiHandler virtualMachineScaleSetMsiHandler; private final BootDiagnosticsHandler bootDiagnosticsHandler; private String newProximityPlacementGroupName; private ProximityPlacementGroupType newProximityPlacementGroupType; private boolean removeOsProfile; private final ClientLogger logger = new ClientLogger(VirtualMachineScaleSetImpl.class); private boolean profileAttached = false; VirtualMachineScaleSetImpl( String name, VirtualMachineScaleSetInner innerModel, final ComputeManager computeManager, final StorageManager storageManager, final NetworkManager networkManager, final AuthorizationManager authorizationManager) { super(name, innerModel, computeManager); this.storageManager = storageManager; this.networkManager = networkManager; this.namer = this.manager().resourceManager().internalContext().createIdentifierProvider(this.name()); this.managedDataDisks = new ManagedDataDiskCollection(this); this.virtualMachineScaleSetMsiHandler = new VirtualMachineScaleSetMsiHandler(authorizationManager, this); this.bootDiagnosticsHandler = new BootDiagnosticsHandler(this); this.newProximityPlacementGroupName = null; this.newProximityPlacementGroupType = null; } @Override protected void initializeChildrenFromInner() { this.extensions = new HashMap<>(); if (this.innerModel() != null && this.innerModel().virtualMachineProfile() != null && this.innerModel().virtualMachineProfile().extensionProfile() != null) { if (this.innerModel().virtualMachineProfile().extensionProfile().extensions() != null) { for (VirtualMachineScaleSetExtensionInner inner : this.innerModel().virtualMachineProfile().extensionProfile().extensions()) { this.extensions.put(inner.name(), new VirtualMachineScaleSetExtensionImpl(inner, this)); } } } } @Override public VirtualMachineScaleSetVMs virtualMachines() { return new VirtualMachineScaleSetVMsImpl( this, this.manager().serviceClient().getVirtualMachineScaleSetVMs(), this.myManager); } @Override public PagedIterable<VirtualMachineScaleSetSku> listAvailableSkus() { return PagedConverter.mapPage(this .manager() .serviceClient() .getVirtualMachineScaleSets() .listSkus(this.resourceGroupName(), this.name()), VirtualMachineScaleSetSkuImpl::new); } @Override public void deallocate() { this.deallocateAsync().block(); } @Override public Mono<Void> deallocateAsync() { return this .manager() .serviceClient() .getVirtualMachineScaleSets() .deallocateAsync(this.resourceGroupName(), this.name(), null) .map(aVoid -> this.refreshAsync()) .then(); } @Override public void powerOff() { this.powerOffAsync().block(); } @Override public Mono<Void> powerOffAsync() { return this .manager() .serviceClient() .getVirtualMachineScaleSets() .powerOffAsync(this.resourceGroupName(), this.name(), null, null); } @Override public void restart() { this.restartAsync().block(); } @Override public Mono<Void> restartAsync() { return this .manager() .serviceClient() .getVirtualMachineScaleSets() .restartAsync(this.resourceGroupName(), this.name(), null); } @Override public void start() { this.startAsync().block(); } @Override public Mono<Void> startAsync() { return this .manager() .serviceClient() .getVirtualMachineScaleSets() .startAsync(this.resourceGroupName(), this.name(), null); } @Override public void reimage() { this.reimageAsync().block(); } @Override public Mono<Void> reimageAsync() { return this .manager() .serviceClient() .getVirtualMachineScaleSets() .reimageAsync(this.resourceGroupName(), this.name(), null); } @Override public RunCommandResult runPowerShellScriptInVMInstance( String vmId, List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) { return this .manager() .virtualMachineScaleSets() .runPowerShellScriptInVMInstance( this.resourceGroupName(), this.name(), vmId, scriptLines, scriptParameters); } @Override public Mono<RunCommandResult> runPowerShellScriptInVMInstanceAsync( String vmId, List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) { return this .manager() .virtualMachineScaleSets() .runPowerShellScriptInVMInstanceAsync( this.resourceGroupName(), this.name(), vmId, scriptLines, scriptParameters); } @Override public RunCommandResult runShellScriptInVMInstance( String vmId, List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) { return this .manager() .virtualMachineScaleSets() .runShellScriptInVMInstance(this.resourceGroupName(), this.name(), vmId, scriptLines, scriptParameters); } @Override public Mono<RunCommandResult> runShellScriptInVMInstanceAsync( String vmId, List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) { return this .manager() .virtualMachineScaleSets() .runShellScriptInVMInstanceAsync( this.resourceGroupName(), this.name(), vmId, scriptLines, scriptParameters); } @Override public RunCommandResult runCommandInVMInstance(String vmId, RunCommandInput inputCommand) { return this .manager() .virtualMachineScaleSets() .runCommandInVMInstance(this.resourceGroupName(), this.name(), vmId, inputCommand); } @Override public Mono<RunCommandResult> runCommandVMInstanceAsync(String vmId, RunCommandInput inputCommand) { return this .manager() .virtualMachineScaleSets() .runCommandVMInstanceAsync(this.resourceGroupName(), this.name(), vmId, inputCommand); } @Override public String computerNamePrefix() { if (this.innerModel() != null && this.innerModel().virtualMachineProfile() != null) { return this.innerModel().virtualMachineProfile().osProfile().computerNamePrefix(); } else { return null; } } @Override public OperatingSystemTypes osType() { if (this.innerModel() != null && this.innerModel().virtualMachineProfile() != null) { return this.innerModel().virtualMachineProfile().storageProfile().osDisk().osType(); } else { return null; } } @Override public CachingTypes osDiskCachingType() { if (this.innerModel() != null && this.innerModel().virtualMachineProfile() != null) { return this.innerModel().virtualMachineProfile().storageProfile().osDisk().caching(); } else { return null; } } @Override public String osDiskName() { if (this.innerModel() != null && this.innerModel().virtualMachineProfile() != null) { return this.innerModel().virtualMachineProfile().storageProfile().osDisk().name(); } else { return null; } } @Override public UpgradeMode upgradeModel() { return this.innerModel().upgradePolicy().mode(); } @Override public boolean overProvisionEnabled() { return this.innerModel().overprovision(); } @Override public VirtualMachineScaleSetSkuTypes sku() { return VirtualMachineScaleSetSkuTypes.fromSku(this.innerModel().sku()); } @Override public int capacity() { return ResourceManagerUtils.toPrimitiveInt(this.innerModel().sku().capacity()); } @Override public Network getPrimaryNetwork() throws IOException { VirtualMachineScaleSetIpConfiguration ipConfiguration = primaryNicDefaultIpConfiguration(); if (ipConfiguration == null) { return null; } String subnetId = ipConfiguration.subnet().id(); String virtualNetworkId = ResourceUtils.parentResourceIdFromResourceId(subnetId); return this.networkManager.networks().getById(virtualNetworkId); } @Override public LoadBalancer getPrimaryInternetFacingLoadBalancer() throws IOException { if (this.primaryInternetFacingLoadBalancer == null) { loadCurrentPrimaryLoadBalancersIfAvailableAsync().block(); } return this.primaryInternetFacingLoadBalancer; } @Override public Map<String, LoadBalancerBackend> listPrimaryInternetFacingLoadBalancerBackends() throws IOException { if (this.getPrimaryInternetFacingLoadBalancer() != null) { return getBackendsAssociatedWithIpConfiguration( this.primaryInternetFacingLoadBalancer, primaryNicDefaultIpConfiguration()); } return new HashMap<>(); } @Override public Map<String, LoadBalancerInboundNatPool> listPrimaryInternetFacingLoadBalancerInboundNatPools() throws IOException { if (this.getPrimaryInternetFacingLoadBalancer() != null) { return getInboundNatPoolsAssociatedWithIpConfiguration( this.primaryInternetFacingLoadBalancer, primaryNicDefaultIpConfiguration()); } return new HashMap<>(); } @Override public LoadBalancer getPrimaryInternalLoadBalancer() throws IOException { if (this.primaryInternalLoadBalancer == null) { loadCurrentPrimaryLoadBalancersIfAvailableAsync().block(); } return this.primaryInternalLoadBalancer; } @Override public Map<String, LoadBalancerBackend> listPrimaryInternalLoadBalancerBackends() throws IOException { if (this.getPrimaryInternalLoadBalancer() != null) { return getBackendsAssociatedWithIpConfiguration( this.primaryInternalLoadBalancer, primaryNicDefaultIpConfiguration()); } return new HashMap<>(); } @Override public Map<String, LoadBalancerInboundNatPool> listPrimaryInternalLoadBalancerInboundNatPools() throws IOException { if (this.getPrimaryInternalLoadBalancer() != null) { return getInboundNatPoolsAssociatedWithIpConfiguration( this.primaryInternalLoadBalancer, primaryNicDefaultIpConfiguration()); } return new HashMap<>(); } @Override public List<String> primaryPublicIpAddressIds() throws IOException { LoadBalancer loadBalancer = this.getPrimaryInternetFacingLoadBalancer(); if (loadBalancer != null) { return loadBalancer.publicIpAddressIds(); } return new ArrayList<>(); } @Override public List<String> vhdContainers() { if (this.storageProfile() != null && this.storageProfile().osDisk() != null && this.storageProfile().osDisk().vhdContainers() != null) { return this.storageProfile().osDisk().vhdContainers(); } return new ArrayList<>(); } @Override public VirtualMachineScaleSetStorageProfile storageProfile() { if (this.innerModel().virtualMachineProfile() != null) { return this.innerModel().virtualMachineProfile().storageProfile(); } else { return null; } } @Override public VirtualMachineScaleSetNetworkProfile networkProfile() { if (this.innerModel().virtualMachineProfile() != null) { return this.innerModel().virtualMachineProfile().networkProfile(); } else { return null; } } @Override public Map<String, VirtualMachineScaleSetExtension> extensions() { return Collections.unmodifiableMap(this.extensions); } @Override public VirtualMachinePriorityTypes virtualMachinePriority() { if (this.innerModel().virtualMachineProfile() != null) { return this.innerModel().virtualMachineProfile().priority(); } else { return null; } } @Override public BillingProfile billingProfile() { if (this.innerModel().virtualMachineProfile() != null) { return this.innerModel().virtualMachineProfile().billingProfile(); } else { return null; } } @Override public VirtualMachineScaleSetPublicIpAddressConfiguration virtualMachinePublicIpConfig() { VirtualMachineScaleSetIpConfiguration nicConfig = this.primaryNicDefaultIpConfiguration(); if (nicConfig == null) { return null; } return nicConfig.publicIpAddressConfiguration(); } @Override public VirtualMachineEvictionPolicyTypes virtualMachineEvictionPolicy() { if (this.innerModel() != null && this.innerModel().virtualMachineProfile() != null) { return this.innerModel().virtualMachineProfile().evictionPolicy(); } else { return null; } } @Override public boolean isIpForwardingEnabled() { VirtualMachineScaleSetNetworkConfiguration nicConfig = primaryNicConfiguration(); if (nicConfig == null || nicConfig.enableIpForwarding() == null) { return false; } return nicConfig.enableIpForwarding(); } @Override public boolean isAcceleratedNetworkingEnabled() { VirtualMachineScaleSetNetworkConfiguration nicConfig = primaryNicConfiguration(); if (nicConfig == null || nicConfig.enableAcceleratedNetworking() == null) { return false; } return nicConfig.enableAcceleratedNetworking(); } @Override public String networkSecurityGroupId() { VirtualMachineScaleSetNetworkConfiguration nicConfig = primaryNicConfiguration(); if (nicConfig != null && nicConfig.networkSecurityGroup() != null) { return nicConfig.networkSecurityGroup().id(); } else { return null; } } @Override public boolean isSinglePlacementGroupEnabled() { if (this.innerModel().singlePlacementGroup() != null) { return this.innerModel().singlePlacementGroup(); } else { return false; } } @Override @Override public List<String> applicationSecurityGroupIds() { VirtualMachineScaleSetIpConfiguration nicIpConfig = this.primaryNicDefaultIpConfiguration(); if (nicIpConfig == null) { return new ArrayList<>(); } List<String> asgIds = new ArrayList<>(); if (nicIpConfig.applicationSecurityGroups() != null) { for (SubResource asg : nicIpConfig.applicationSecurityGroups()) { asgIds.add(asg.id()); } } return asgIds; } @Override public Boolean doNotRunExtensionsOnOverprovisionedVMs() { return this.innerModel().doNotRunExtensionsOnOverprovisionedVMs(); } @Override public ProximityPlacementGroup proximityPlacementGroup() { if (innerModel().proximityPlacementGroup() == null) { return null; } else { ResourceId id = ResourceId.fromString(innerModel().proximityPlacementGroup().id()); ProximityPlacementGroupInner plgInner = manager() .serviceClient() .getProximityPlacementGroups() .getByResourceGroup(id.resourceGroupName(), id.name()); if (plgInner == null) { return null; } else { return new ProximityPlacementGroupImpl(plgInner); } } } @Override public AdditionalCapabilities additionalCapabilities() { return this.innerModel().additionalCapabilities(); } @Override public Plan plan() { return this.innerModel().plan(); } @Override public OrchestrationMode orchestrationMode() { return this.innerModel().orchestrationMode() == null ? OrchestrationMode.UNIFORM : this.innerModel().orchestrationMode(); } @Override public VirtualMachineScaleSetNetworkInterface getNetworkInterfaceByInstanceId(String instanceId, String name) { return this .networkManager .networkInterfaces() .getByVirtualMachineScaleSetInstanceId(this.resourceGroupName(), this.name(), instanceId, name); } @Override public Mono<VirtualMachineScaleSetNetworkInterface> getNetworkInterfaceByInstanceIdAsync(String instanceId, String name) { return this .networkManager .networkInterfaces() .getByVirtualMachineScaleSetInstanceIdAsync(this.resourceGroupName(), this.name(), instanceId, name); } @Override public PagedIterable<VirtualMachineScaleSetNetworkInterface> listNetworkInterfaces() { return this .networkManager .networkInterfaces() .listByVirtualMachineScaleSet(this.resourceGroupName(), this.name()); } @Override public PagedIterable<VirtualMachineScaleSetNetworkInterface> listNetworkInterfacesByInstanceId( String virtualMachineInstanceId) { return this .networkManager .networkInterfaces() .listByVirtualMachineScaleSetInstanceId(this.resourceGroupName(), this.name(), virtualMachineInstanceId); } @Override public PagedFlux<VirtualMachineScaleSetNetworkInterface> listNetworkInterfacesByInstanceIdAsync( String virtualMachineInstanceId) { return this .networkManager .networkInterfaces() .listByVirtualMachineScaleSetInstanceIdAsync( this.resourceGroupName(), this.name(), virtualMachineInstanceId); } @Override public VirtualMachineScaleSetImpl withSku(VirtualMachineScaleSetSkuTypes skuType) { this.innerModel().withSku(skuType.sku()); initVMProfileIfNecessary(); return this; } @Override public VirtualMachineScaleSetImpl withFlexibleOrchestrationMode() { return withFlexibleOrchestrationMode(1); } @Override public VirtualMachineScaleSetImpl withFlexibleOrchestrationMode(int faultDomainCount) { this.innerModel().withOrchestrationMode(OrchestrationMode.FLEXIBLE); this.innerModel().withPlatformFaultDomainCount(faultDomainCount); return this; } @Override public VirtualMachineScaleSetImpl withSku(VirtualMachineScaleSetSku sku) { return this.withSku(sku.skuType()); } @Override public VirtualMachineScaleSetImpl withExistingPrimaryNetworkSubnet(Network network, String subnetName) { initVMProfileIfNecessary(); this.existingPrimaryNetworkSubnetNameToAssociate = mergePath(network.id(), "subnets", subnetName); return this; } @Override public VirtualMachineScaleSetImpl withExistingPrimaryInternetFacingLoadBalancer(LoadBalancer loadBalancer) { if (loadBalancer.publicIpAddressIds().isEmpty()) { throw logger .logExceptionAsError( new IllegalArgumentException("Parameter loadBalancer must be an Internet facing load balancer")); } initVMProfileIfNecessary(); if (isInCreateMode()) { this.primaryInternetFacingLoadBalancer = loadBalancer; associateLoadBalancerToIpConfiguration( this.primaryInternetFacingLoadBalancer, this.primaryNicDefaultIpConfiguration()); } else { this.primaryInternetFacingLoadBalancerToAttachOnUpdate = loadBalancer; } return this; } @Override public VirtualMachineScaleSetImpl withPrimaryInternetFacingLoadBalancerBackends(String... backendNames) { initVMProfileIfNecessary(); if (this.isInCreateMode()) { VirtualMachineScaleSetIpConfiguration defaultPrimaryIpConfig = this.primaryNicDefaultIpConfiguration(); removeAllBackendAssociationFromIpConfiguration( this.primaryInternetFacingLoadBalancer, defaultPrimaryIpConfig); associateBackEndsToIpConfiguration( this.primaryInternetFacingLoadBalancer.id(), defaultPrimaryIpConfig, backendNames); } else { addToList(this.primaryInternetFacingLBBackendsToAddOnUpdate, backendNames); } return this; } @Override public VirtualMachineScaleSetImpl withPrimaryInternetFacingLoadBalancerInboundNatPools(String... natPoolNames) { initVMProfileIfNecessary(); if (this.isInCreateMode()) { VirtualMachineScaleSetIpConfiguration defaultPrimaryIpConfig = this.primaryNicDefaultIpConfiguration(); removeAllInboundNatPoolAssociationFromIpConfiguration( this.primaryInternetFacingLoadBalancer, defaultPrimaryIpConfig); associateInboundNATPoolsToIpConfiguration( this.primaryInternetFacingLoadBalancer.id(), defaultPrimaryIpConfig, natPoolNames); } else { addToList(this.primaryInternetFacingLBInboundNatPoolsToAddOnUpdate, natPoolNames); } return this; } @Override public VirtualMachineScaleSetImpl withExistingPrimaryInternalLoadBalancer(LoadBalancer loadBalancer) { if (!loadBalancer.publicIpAddressIds().isEmpty()) { throw logger .logExceptionAsError( new IllegalArgumentException("Parameter loadBalancer must be an internal load balancer")); } String lbNetworkId = null; for (LoadBalancerPrivateFrontend frontEnd : loadBalancer.privateFrontends().values()) { if (frontEnd.networkId() != null) { lbNetworkId = frontEnd.networkId(); } } initVMProfileIfNecessary(); if (isInCreateMode()) { String vmNICNetworkId = ResourceUtils.parentResourceIdFromResourceId(this.existingPrimaryNetworkSubnetNameToAssociate); if (!vmNICNetworkId.equalsIgnoreCase(lbNetworkId)) { throw logger .logExceptionAsError( new IllegalArgumentException( "Virtual network associated with scale set virtual machines" + " and internal load balancer must be same. " + "'" + vmNICNetworkId + "'" + "'" + lbNetworkId)); } this.primaryInternalLoadBalancer = loadBalancer; associateLoadBalancerToIpConfiguration( this.primaryInternalLoadBalancer, this.primaryNicDefaultIpConfiguration()); } else { String vmNicVnetId = ResourceUtils.parentResourceIdFromResourceId(primaryNicDefaultIpConfiguration().subnet().id()); if (!vmNicVnetId.equalsIgnoreCase(lbNetworkId)) { throw logger .logExceptionAsError( new IllegalArgumentException( "Virtual network associated with scale set virtual machines" + " and internal load balancer must be same. " + "'" + vmNicVnetId + "'" + "'" + lbNetworkId)); } this.primaryInternalLoadBalancerToAttachOnUpdate = loadBalancer; } return this; } @Override public VirtualMachineScaleSetImpl withPrimaryInternalLoadBalancerBackends(String... backendNames) { initVMProfileIfNecessary(); if (this.isInCreateMode()) { VirtualMachineScaleSetIpConfiguration defaultPrimaryIpConfig = primaryNicDefaultIpConfiguration(); removeAllBackendAssociationFromIpConfiguration(this.primaryInternalLoadBalancer, defaultPrimaryIpConfig); associateBackEndsToIpConfiguration( this.primaryInternalLoadBalancer.id(), defaultPrimaryIpConfig, backendNames); } else { addToList(this.primaryInternalLBBackendsToAddOnUpdate, backendNames); } return this; } @Override public VirtualMachineScaleSetImpl withPrimaryInternalLoadBalancerInboundNatPools(String... natPoolNames) { initVMProfileIfNecessary(); if (this.isInCreateMode()) { VirtualMachineScaleSetIpConfiguration defaultPrimaryIpConfig = this.primaryNicDefaultIpConfiguration(); removeAllInboundNatPoolAssociationFromIpConfiguration( this.primaryInternalLoadBalancer, defaultPrimaryIpConfig); associateInboundNATPoolsToIpConfiguration( this.primaryInternalLoadBalancer.id(), defaultPrimaryIpConfig, natPoolNames); } else { addToList(this.primaryInternalLBInboundNatPoolsToAddOnUpdate, natPoolNames); } return this; } @Override public VirtualMachineScaleSetImpl withoutPrimaryInternalLoadBalancer() { if (this.isInUpdateMode()) { this.removePrimaryInternalLoadBalancerOnUpdate = true; } return this; } @Override public VirtualMachineScaleSetImpl withoutPrimaryInternetFacingLoadBalancer() { if (this.isInUpdateMode()) { this.removePrimaryInternetFacingLoadBalancerOnUpdate = true; } return this; } @Override public VirtualMachineScaleSetImpl withoutPrimaryInternetFacingLoadBalancerBackends(String... backendNames) { addToList(this.primaryInternetFacingLBBackendsToRemoveOnUpdate, backendNames); return this; } @Override public VirtualMachineScaleSetImpl withoutPrimaryInternalLoadBalancerBackends(String... backendNames) { addToList(this.primaryInternalLBBackendsToRemoveOnUpdate, backendNames); return this; } @Override public VirtualMachineScaleSetImpl withoutPrimaryInternetFacingLoadBalancerNatPools(String... natPoolNames) { addToList(this.primaryInternetFacingLBInboundNatPoolsToRemoveOnUpdate, natPoolNames); return this; } @Override public VirtualMachineScaleSetImpl withoutPrimaryInternalLoadBalancerNatPools(String... natPoolNames) { addToList(this.primaryInternalLBInboundNatPoolsToRemoveOnUpdate, natPoolNames); return this; } @Override public VirtualMachineScaleSetImpl withPopularWindowsImage(KnownWindowsVirtualMachineImage knownImage) { return withSpecificWindowsImageVersion(knownImage.imageReference()); } @Override public VirtualMachineScaleSetImpl withLatestWindowsImage(String publisher, String offer, String sku) { ImageReference imageReference = new ImageReference().withPublisher(publisher).withOffer(offer).withSku(sku).withVersion("latest"); return withSpecificWindowsImageVersion(imageReference); } @Override public VirtualMachineScaleSetImpl withSpecificWindowsImageVersion(ImageReference imageReference) { initVMProfileIfNecessary(); this .innerModel() .virtualMachineProfile() .storageProfile() .osDisk() .withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().virtualMachineProfile().storageProfile().withImageReference(imageReference); this.innerModel().virtualMachineProfile().osProfile().withWindowsConfiguration(new WindowsConfiguration()); this.innerModel().virtualMachineProfile().osProfile().windowsConfiguration().withProvisionVMAgent(true); this.innerModel().virtualMachineProfile().osProfile().windowsConfiguration().withEnableAutomaticUpdates(true); return this; } @Override public VirtualMachineScaleSetImpl withGeneralizedWindowsCustomImage(String customImageId) { initVMProfileIfNecessary(); ImageReference imageReferenceInner = new ImageReference(); imageReferenceInner.withId(customImageId); this .innerModel() .virtualMachineProfile() .storageProfile() .osDisk() .withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().virtualMachineProfile().storageProfile().withImageReference(imageReferenceInner); this.innerModel().virtualMachineProfile().osProfile().withWindowsConfiguration(new WindowsConfiguration()); this.innerModel().virtualMachineProfile().osProfile().windowsConfiguration().withProvisionVMAgent(true); this.innerModel().virtualMachineProfile().osProfile().windowsConfiguration().withEnableAutomaticUpdates(true); return this; } @Override public VirtualMachineScaleSetImpl withSpecializedWindowsCustomImage(String customImageId) { this.withGeneralizedWindowsCustomImage(customImageId); this.removeOsProfile = true; return this; } @Override public VirtualMachineScaleSetImpl withStoredWindowsImage(String imageUrl) { initVMProfileIfNecessary(); VirtualHardDisk userImageVhd = new VirtualHardDisk(); userImageVhd.withUri(imageUrl); this .innerModel() .virtualMachineProfile() .storageProfile() .osDisk() .withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().virtualMachineProfile().storageProfile().osDisk().withImage(userImageVhd); this.innerModel().virtualMachineProfile().storageProfile().osDisk().withOsType(OperatingSystemTypes.WINDOWS); this.innerModel().virtualMachineProfile().osProfile().withWindowsConfiguration(new WindowsConfiguration()); this.innerModel().virtualMachineProfile().osProfile().windowsConfiguration().withProvisionVMAgent(true); this.innerModel().virtualMachineProfile().osProfile().windowsConfiguration().withEnableAutomaticUpdates(true); return this; } @Override public VirtualMachineScaleSetImpl withPopularLinuxImage(KnownLinuxVirtualMachineImage knownImage) { return withSpecificLinuxImageVersion(knownImage.imageReference()); } @Override public VirtualMachineScaleSetImpl withLatestLinuxImage(String publisher, String offer, String sku) { ImageReference imageReference = new ImageReference().withPublisher(publisher).withOffer(offer).withSku(sku).withVersion("latest"); return withSpecificLinuxImageVersion(imageReference); } @Override public VirtualMachineScaleSetImpl withSpecificLinuxImageVersion(ImageReference imageReference) { initVMProfileIfNecessary(); this .innerModel() .virtualMachineProfile() .storageProfile() .osDisk() .withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().virtualMachineProfile().storageProfile().withImageReference(imageReference); this.innerModel().virtualMachineProfile().osProfile().withLinuxConfiguration(new LinuxConfiguration()); this.isMarketplaceLinuxImage = true; return this; } @Override public VirtualMachineScaleSetImpl withGeneralizedLinuxCustomImage(String customImageId) { initVMProfileIfNecessary(); ImageReference imageReferenceInner = new ImageReference(); imageReferenceInner.withId(customImageId); this .innerModel() .virtualMachineProfile() .storageProfile() .osDisk() .withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().virtualMachineProfile().storageProfile().withImageReference(imageReferenceInner); this.innerModel().virtualMachineProfile().osProfile().withLinuxConfiguration(new LinuxConfiguration()); this.isMarketplaceLinuxImage = true; return this; } @Override public VirtualMachineScaleSetImpl withSpecializedLinuxCustomImage(String customImageId) { this.withGeneralizedLinuxCustomImage(customImageId); this.removeOsProfile = true; return this; } @Override public VirtualMachineScaleSetImpl withStoredLinuxImage(String imageUrl) { initVMProfileIfNecessary(); VirtualHardDisk userImageVhd = new VirtualHardDisk(); userImageVhd.withUri(imageUrl); this .innerModel() .virtualMachineProfile() .storageProfile() .osDisk() .withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().virtualMachineProfile().storageProfile().osDisk().withImage(userImageVhd); this.innerModel().virtualMachineProfile().storageProfile().osDisk().withOsType(OperatingSystemTypes.LINUX); this.innerModel().virtualMachineProfile().osProfile().withLinuxConfiguration(new LinuxConfiguration()); return this; } @Override public VirtualMachineScaleSetImpl withAdminUsername(String adminUserName) { initVMProfileIfNecessary(); this.innerModel().virtualMachineProfile().osProfile().withAdminUsername(adminUserName); return this; } @Override public VirtualMachineScaleSetImpl withRootUsername(String adminUserName) { initVMProfileIfNecessary(); this.innerModel().virtualMachineProfile().osProfile().withAdminUsername(adminUserName); return this; } @Override public VirtualMachineScaleSetImpl withAdminPassword(String password) { initVMProfileIfNecessary(); this.innerModel().virtualMachineProfile().osProfile().withAdminPassword(password); return this; } @Override public VirtualMachineScaleSetImpl withRootPassword(String password) { initVMProfileIfNecessary(); this.innerModel().virtualMachineProfile().osProfile().withAdminPassword(password); return this; } @Override public VirtualMachineScaleSetImpl withSsh(String publicKeyData) { initVMProfileIfNecessary(); VirtualMachineScaleSetOSProfile osProfile = this.innerModel().virtualMachineProfile().osProfile(); if (osProfile.linuxConfiguration().ssh() == null) { SshConfiguration sshConfiguration = new SshConfiguration(); sshConfiguration.withPublicKeys(new ArrayList<SshPublicKey>()); osProfile.linuxConfiguration().withSsh(sshConfiguration); } SshPublicKey sshPublicKey = new SshPublicKey(); sshPublicKey.withKeyData(publicKeyData); sshPublicKey.withPath("/home/" + osProfile.adminUsername() + "/.ssh/authorized_keys"); osProfile.linuxConfiguration().ssh().publicKeys().add(sshPublicKey); return this; } @Override public VirtualMachineScaleSetImpl withVMAgent() { initVMProfileIfNecessary(); this.innerModel().virtualMachineProfile().osProfile().windowsConfiguration().withProvisionVMAgent(true); return this; } @Override public VirtualMachineScaleSetImpl withoutVMAgent() { initVMProfileIfNecessary(); this.innerModel().virtualMachineProfile().osProfile().windowsConfiguration().withProvisionVMAgent(false); return this; } @Override public VirtualMachineScaleSetImpl withAutoUpdate() { initVMProfileIfNecessary(); this.innerModel().virtualMachineProfile().osProfile().windowsConfiguration().withEnableAutomaticUpdates(true); return this; } @Override public VirtualMachineScaleSetImpl withoutAutoUpdate() { initVMProfileIfNecessary(); this.innerModel().virtualMachineProfile().osProfile().windowsConfiguration().withEnableAutomaticUpdates(false); return this; } @Override public VirtualMachineScaleSetImpl withTimeZone(String timeZone) { initVMProfileIfNecessary(); this.innerModel().virtualMachineProfile().osProfile().windowsConfiguration().withTimeZone(timeZone); return this; } @Override public VirtualMachineScaleSetImpl withWinRM(WinRMListener listener) { initVMProfileIfNecessary(); if (this.innerModel().virtualMachineProfile().osProfile().windowsConfiguration().winRM() == null) { WinRMConfiguration winRMConfiguration = new WinRMConfiguration(); this.innerModel().virtualMachineProfile().osProfile().windowsConfiguration().withWinRM(winRMConfiguration); } this.innerModel().virtualMachineProfile().osProfile().windowsConfiguration().winRM().listeners().add(listener); return this; } @Override public VirtualMachineScaleSetImpl withOSDiskCaching(CachingTypes cachingType) { initVMProfileIfNecessary(); this.innerModel().virtualMachineProfile().storageProfile().osDisk().withCaching(cachingType); return this; } @Override public VirtualMachineScaleSetImpl withOSDiskName(String name) { initVMProfileIfNecessary(); this.innerModel().virtualMachineProfile().storageProfile().osDisk().withName(name); return this; } @Override public VirtualMachineScaleSetImpl withComputerNamePrefix(String namePrefix) { initVMProfileIfNecessary(); this.innerModel().virtualMachineProfile().osProfile().withComputerNamePrefix(namePrefix); return this; } @Override public VirtualMachineScaleSetImpl withUpgradeMode(UpgradeMode upgradeMode) { if (this.innerModel().upgradePolicy() == null) { this.innerModel().withUpgradePolicy(new UpgradePolicy()); } this.innerModel().upgradePolicy().withMode(upgradeMode); return this; } @Override public VirtualMachineScaleSetImpl withOverProvision(boolean enabled) { this.innerModel().withOverprovision(enabled); return this; } @Override public VirtualMachineScaleSetImpl withOverProvisioning() { return this.withOverProvision(true); } @Override public VirtualMachineScaleSetImpl withoutOverProvisioning() { return this.withOverProvision(false); } @Override public VirtualMachineScaleSetImpl withCapacity(long capacity) { this.innerModel().sku().withCapacity(capacity); return this; } @Override public VirtualMachineScaleSetImpl withNewStorageAccount(String name) { StorageAccount.DefinitionStages.WithGroup definitionWithGroup = this.storageManager.storageAccounts().define(name).withRegion(this.regionName()); Creatable<StorageAccount> definitionAfterGroup; if (this.creatableGroup != null) { definitionAfterGroup = definitionWithGroup.withNewResourceGroup(this.creatableGroup); } else { definitionAfterGroup = definitionWithGroup.withExistingResourceGroup(this.resourceGroupName()); } return withNewStorageAccount(definitionAfterGroup); } @Override public VirtualMachineScaleSetImpl withNewStorageAccount(Creatable<StorageAccount> creatable) { this.creatableStorageAccountKeys.add(this.addDependency(creatable)); return this; } @Override public VirtualMachineScaleSetImpl withExistingStorageAccount(StorageAccount storageAccount) { this.existingStorageAccountsToAssociate.add(storageAccount); return this; } @Override public VirtualMachineScaleSetImpl withCustomData(String base64EncodedCustomData) { initVMProfileIfNecessary(); this.innerModel().virtualMachineProfile().osProfile().withCustomData(base64EncodedCustomData); return this; } @Override public VirtualMachineScaleSetImpl withSecrets(List<VaultSecretGroup> secrets) { initVMProfileIfNecessary(); this.innerModel().virtualMachineProfile().osProfile().withSecrets(secrets); return this; } @Override public VirtualMachineScaleSetImpl withoutSecrets() { initVMProfileIfNecessary(); this.innerModel().virtualMachineProfile().osProfile().withSecrets(new ArrayList<VaultSecretGroup>()); return this; } @Override public VirtualMachineScaleSetExtensionImpl defineNewExtension(String name) { return new VirtualMachineScaleSetExtensionImpl(new VirtualMachineScaleSetExtensionInner().withName(name), this); } protected VirtualMachineScaleSetImpl withExtension(VirtualMachineScaleSetExtensionImpl extension) { this.extensions.put(extension.name(), extension); return this; } @Override public VirtualMachineScaleSetExtensionImpl updateExtension(String name) { return (VirtualMachineScaleSetExtensionImpl) this.extensions.get(name); } @Override public VirtualMachineScaleSetImpl withoutExtension(String name) { if (this.extensions.containsKey(name)) { this.extensions.remove(name); } return this; } @Override public boolean isManagedDiskEnabled() { if (this.innerModel() == null || this.innerModel().virtualMachineProfile() == null) { return false; } VirtualMachineScaleSetStorageProfile storageProfile = this.innerModel().virtualMachineProfile().storageProfile(); if (isOsDiskFromCustomImage(storageProfile)) { return true; } if (isOSDiskFromStoredImage(storageProfile)) { return false; } if (isOSDiskFromPlatformImage(storageProfile)) { if (this.isUnmanagedDiskSelected) { return false; } } if (isInCreateMode()) { return true; } else { List<String> vhdContainers = storageProfile.osDisk().vhdContainers(); return vhdContainers == null || vhdContainers.size() == 0; } } @Override public boolean isManagedServiceIdentityEnabled() { ResourceIdentityType type = this.managedServiceIdentityType(); return type != null && !type.equals(ResourceIdentityType.NONE); } @Override public String systemAssignedManagedServiceIdentityTenantId() { if (this.innerModel().identity() != null) { return this.innerModel().identity().tenantId(); } return null; } @Override public String systemAssignedManagedServiceIdentityPrincipalId() { if (this.innerModel().identity() != null) { return this.innerModel().identity().principalId(); } return null; } @Override public ResourceIdentityType managedServiceIdentityType() { if (this.innerModel().identity() != null) { return this.innerModel().identity().type(); } return null; } @Override public Set<String> userAssignedManagedServiceIdentityIds() { if (this.innerModel().identity() != null && this.innerModel().identity().userAssignedIdentities() != null) { return Collections .unmodifiableSet(new HashSet<String>(this.innerModel().identity().userAssignedIdentities().keySet())); } return Collections.unmodifiableSet(new HashSet<String>()); } @Override public Set<AvailabilityZoneId> availabilityZones() { Set<AvailabilityZoneId> zones = new HashSet<>(); if (this.innerModel().zones() != null) { for (String zone : this.innerModel().zones()) { zones.add(AvailabilityZoneId.fromString(zone)); } } return Collections.unmodifiableSet(zones); } @Override public boolean isBootDiagnosticsEnabled() { return this.bootDiagnosticsHandler.isBootDiagnosticsEnabled(); } @Override public String bootDiagnosticsStorageUri() { return this.bootDiagnosticsHandler.bootDiagnosticsStorageUri(); } @Override public StorageAccountTypes managedOSDiskStorageAccountType() { if (this.innerModel().virtualMachineProfile() != null && this.innerModel().virtualMachineProfile().storageProfile() != null && this.innerModel().virtualMachineProfile().storageProfile().osDisk() != null && this.innerModel().virtualMachineProfile().storageProfile().osDisk().managedDisk() != null) { return this .innerModel() .virtualMachineProfile() .storageProfile() .osDisk() .managedDisk() .storageAccountType(); } return null; } @Override public VirtualMachineScaleSetImpl withUnmanagedDisks() { this.isUnmanagedDiskSelected = true; return this; } @Override public VirtualMachineScaleSetImpl withNewDataDisk(int sizeInGB) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VMSS_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); this .managedDataDisks .implicitDisksToAssociate .add(new VirtualMachineScaleSetDataDisk().withLun(-1).withDiskSizeGB(sizeInGB)); return this; } @Override public VirtualMachineScaleSetImpl withNewDataDisk(int sizeInGB, int lun, CachingTypes cachingType) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VMSS_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); this .managedDataDisks .implicitDisksToAssociate .add(new VirtualMachineScaleSetDataDisk().withLun(lun).withDiskSizeGB(sizeInGB).withCaching(cachingType)); return this; } @Override public VirtualMachineScaleSetImpl withNewDataDisk( int sizeInGB, int lun, CachingTypes cachingType, StorageAccountTypes storageAccountType) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VMSS_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); VirtualMachineScaleSetManagedDiskParameters managedDiskParameters = new VirtualMachineScaleSetManagedDiskParameters(); managedDiskParameters.withStorageAccountType(storageAccountType); this .managedDataDisks .implicitDisksToAssociate .add( new VirtualMachineScaleSetDataDisk() .withLun(lun) .withDiskSizeGB(sizeInGB) .withCaching(cachingType) .withManagedDisk(managedDiskParameters)); return this; } @Override public VirtualMachineScaleSetImpl withoutDataDisk(int lun) { if (!isManagedDiskEnabled()) { return this; } this.managedDataDisks.diskLunsToRemove.add(lun); return this; } /* TODO: Broken by change in Azure API behavior @Override public VirtualMachineScaleSetImpl withDataDiskUpdated(int lun, int newSizeInGB) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VMSS_NO_MANAGED_DISK_TO_UPDATE); VirtualMachineScaleSetDataDisk dataDisk = getDataDiskInner(lun); if (dataDisk == null) { throw new RuntimeException(String.format("A data disk with lun '%d' not found", lun)); } dataDisk .withDiskSizeGB(newSizeInGB); return this; } @Override public VirtualMachineScaleSetImpl withDataDiskUpdated(int lun, int newSizeInGB, CachingTypes cachingType) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VMSS_NO_MANAGED_DISK_TO_UPDATE); VirtualMachineScaleSetDataDisk dataDisk = getDataDiskInner(lun); if (dataDisk == null) { throw new RuntimeException(String.format("A data disk with lun '%d' not found", lun)); } dataDisk .withDiskSizeGB(newSizeInGB) .withCaching(cachingType); return this; } @Override public VirtualMachineScaleSetImpl withDataDiskUpdated(int lun, int newSizeInGB, CachingTypes cachingType, StorageAccountTypes storageAccountType) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VMSS_NO_MANAGED_DISK_TO_UPDATE); VirtualMachineScaleSetDataDisk dataDisk = getDataDiskInner(lun); if (dataDisk == null) { throw new RuntimeException(String.format("A data disk with lun '%d' not found", lun)); } dataDisk .withDiskSizeGB(newSizeInGB) .withCaching(cachingType) .managedDisk() .withStorageAccountType(storageAccountType); return this; } private VirtualMachineScaleSetDataDisk getDataDiskInner(int lun) { VirtualMachineScaleSetStorageProfile storageProfile = this .inner() .virtualMachineProfile() .storageProfile(); List<VirtualMachineScaleSetDataDisk> dataDisks = storageProfile .dataDisks(); if (dataDisks == null) { return null; } for (VirtualMachineScaleSetDataDisk dataDisk : dataDisks) { if (dataDisk.lun() == lun) { return dataDisk; } } return null; } */ @Override public VirtualMachineScaleSetImpl withNewDataDiskFromImage(int imageLun) { this.managedDataDisks.newDisksFromImage.add(new VirtualMachineScaleSetDataDisk().withLun(imageLun)); return this; } @Override public VirtualMachineScaleSetImpl withNewDataDiskFromImage( int imageLun, int newSizeInGB, CachingTypes cachingType) { this .managedDataDisks .newDisksFromImage .add( new VirtualMachineScaleSetDataDisk() .withLun(imageLun) .withDiskSizeGB(newSizeInGB) .withCaching(cachingType)); return this; } @Override public VirtualMachineScaleSetImpl withNewDataDiskFromImage( int imageLun, int newSizeInGB, CachingTypes cachingType, StorageAccountTypes storageAccountType) { VirtualMachineScaleSetManagedDiskParameters managedDiskParameters = new VirtualMachineScaleSetManagedDiskParameters(); managedDiskParameters.withStorageAccountType(storageAccountType); this .managedDataDisks .newDisksFromImage .add( new VirtualMachineScaleSetDataDisk() .withLun(imageLun) .withDiskSizeGB(newSizeInGB) .withManagedDisk(managedDiskParameters) .withCaching(cachingType)); return this; } @Override public VirtualMachineScaleSetImpl withOSDiskStorageAccountType(StorageAccountTypes accountType) { initVMProfileIfNecessary(); this .innerModel() .virtualMachineProfile() .storageProfile() .osDisk() .withManagedDisk(new VirtualMachineScaleSetManagedDiskParameters().withStorageAccountType(accountType)); return this; } @Override public VirtualMachineScaleSetImpl withDataDiskDefaultCachingType(CachingTypes cachingType) { this.managedDataDisks.setDefaultCachingType(cachingType); return this; } @Override public VirtualMachineScaleSetImpl withDataDiskDefaultStorageAccountType(StorageAccountTypes storageAccountType) { this.managedDataDisks.setDefaultStorageAccountType(storageAccountType); return this; } @Override public VirtualMachineScaleSetImpl withSystemAssignedManagedServiceIdentity() { this.virtualMachineScaleSetMsiHandler.withLocalManagedServiceIdentity(); return this; } @Override public VirtualMachineScaleSetImpl withoutSystemAssignedManagedServiceIdentity() { this.virtualMachineScaleSetMsiHandler.withoutLocalManagedServiceIdentity(); return this; } @Override public VirtualMachineScaleSetImpl withSystemAssignedIdentityBasedAccessTo(String resourceId, BuiltInRole role) { this.virtualMachineScaleSetMsiHandler.withAccessTo(resourceId, role); return this; } @Override public VirtualMachineScaleSetImpl withSystemAssignedIdentityBasedAccessToCurrentResourceGroup(BuiltInRole asRole) { this.virtualMachineScaleSetMsiHandler.withAccessToCurrentResourceGroup(asRole); return this; } @Override public VirtualMachineScaleSetImpl withSystemAssignedIdentityBasedAccessTo(String scope, String roleDefinitionId) { this.virtualMachineScaleSetMsiHandler.withAccessTo(scope, roleDefinitionId); return this; } @Override public VirtualMachineScaleSetImpl withSystemAssignedIdentityBasedAccessToCurrentResourceGroup( String roleDefinitionId) { this.virtualMachineScaleSetMsiHandler.withAccessToCurrentResourceGroup(roleDefinitionId); return this; } @Override public VirtualMachineScaleSetImpl withNewUserAssignedManagedServiceIdentity(Creatable<Identity> creatableIdentity) { this.virtualMachineScaleSetMsiHandler.withNewExternalManagedServiceIdentity(creatableIdentity); return this; } @Override public VirtualMachineScaleSetImpl withExistingUserAssignedManagedServiceIdentity(Identity identity) { this.virtualMachineScaleSetMsiHandler.withExistingExternalManagedServiceIdentity(identity); return this; } @Override public VirtualMachineScaleSetImpl withoutUserAssignedManagedServiceIdentity(String identityId) { this.virtualMachineScaleSetMsiHandler.withoutExternalManagedServiceIdentity(identityId); return this; } @Override protected void beforeCreating() { setExtensions(); } @Override protected Mono<VirtualMachineScaleSetInner> createInner() { if (this.orchestrationMode() == OrchestrationMode.FLEXIBLE && this.innerModel().sku() == null) { return createInnerNoProfile(); } if (this.shouldSetProfileDefaults()) { this.setOSProfileDefaults(); this.setOSDiskDefault(); } this.setPrimaryIpConfigurationSubnet(); return this .setPrimaryIpConfigurationBackendsAndInboundNatPoolsAsync() .flatMap( virtualMachineScaleSet -> { if (isManagedDiskEnabled()) { this.managedDataDisks.setDataDisksDefaults(); } else { List<VirtualMachineScaleSetDataDisk> dataDisks = this.innerModel().virtualMachineProfile().storageProfile().dataDisks(); VirtualMachineScaleSetUnmanagedDataDiskImpl.setDataDisksDefaults(dataDisks, this.name()); } this.handleUnManagedOSDiskContainers(); this.bootDiagnosticsHandler.handleDiagnosticsSettings(); this.virtualMachineScaleSetMsiHandler.processCreatedExternalIdentities(); this.virtualMachineScaleSetMsiHandler.handleExternalIdentities(); this.createNewProximityPlacementGroup(); this.adjustProfileForFlexibleMode(); return this .manager() .serviceClient() .getVirtualMachineScaleSets() .createOrUpdateAsync(resourceGroupName(), name(), innerModel()); }); } @Override protected void afterCreating() { this.clearCachedProperties(); this.virtualMachineScaleSetMsiHandler.clear(); } @Override public Mono<VirtualMachineScaleSet> updateResourceAsync() { final VirtualMachineScaleSetImpl self = this; if (this.orchestrationMode() == OrchestrationMode.FLEXIBLE && this.innerModel().virtualMachineProfile() == null) { return updateResourceAsyncNoProfile(self); } setExtensions(); if (this.shouldSetProfileDefaults()) { this.setOSProfileDefaults(); this.setOSDiskDefault(); } this.setPrimaryIpConfigurationSubnet(); return this .setPrimaryIpConfigurationBackendsAndInboundNatPoolsAsync() .map( virtualMachineScaleSet -> { if (isManagedDiskEnabled()) { this.managedDataDisks.setDataDisksDefaults(); } else if (this.innerModel() != null && this.innerModel().virtualMachineProfile() != null) { List<VirtualMachineScaleSetDataDisk> dataDisks = this.innerModel().virtualMachineProfile().storageProfile().dataDisks(); VirtualMachineScaleSetUnmanagedDataDiskImpl.setDataDisksDefaults(dataDisks, this.name()); } this.handleUnManagedOSDiskContainers(); this.bootDiagnosticsHandler.handleDiagnosticsSettings(); this.virtualMachineScaleSetMsiHandler.processCreatedExternalIdentities(); this.adjustProfileForFlexibleMode(); VirtualMachineScaleSetUpdate updateParameter = VMSSPatchPayload.preparePatchPayload(this); this.virtualMachineScaleSetMsiHandler.handleExternalIdentities(updateParameter); return updateParameter; }) .flatMap( updateParameter -> this .manager() .serviceClient() .getVirtualMachineScaleSets() .updateAsync(resourceGroupName(), name(), updateParameter) .map( vmssInner -> { setInner(vmssInner); self.clearCachedProperties(); self.initializeChildrenFromInner(); self.virtualMachineScaleSetMsiHandler.clear(); return self; })); } @Override public Mono<VirtualMachineScaleSet> refreshAsync() { return super .refreshAsync() .map( scaleSet -> { VirtualMachineScaleSetImpl impl = (VirtualMachineScaleSetImpl) scaleSet; impl.clearCachedProperties(); impl.initializeChildrenFromInner(); return impl; }); } @Override protected Mono<VirtualMachineScaleSetInner> getInnerAsync() { return this .manager() .serviceClient() .getVirtualMachineScaleSets() .getByResourceGroupAsync(this.resourceGroupName(), this.name()); } private void adjustProfileForFlexibleMode() { if (this.orchestrationMode() == OrchestrationMode.FLEXIBLE) { if (this.innerModel().virtualMachineProfile().networkProfile().networkInterfaceConfigurations() != null) { this.innerModel().virtualMachineProfile().networkProfile().networkInterfaceConfigurations().forEach(virtualMachineScaleSetNetworkConfiguration -> { if (virtualMachineScaleSetNetworkConfiguration.ipConfigurations() != null) { virtualMachineScaleSetNetworkConfiguration.ipConfigurations().forEach(virtualMachineScaleSetIpConfiguration -> { virtualMachineScaleSetIpConfiguration.withLoadBalancerInboundNatPools(null); }); } }); } this.innerModel() .withUpgradePolicy(null) .virtualMachineProfile().networkProfile() .withNetworkApiVersion(NetworkApiVersion.TWO_ZERO_TWO_ZERO_ONE_ONE_ZERO_ONE); } } private Mono<VirtualMachineScaleSetInner> createInnerNoProfile() { this.innerModel().withVirtualMachineProfile(null); return manager() .serviceClient() .getVirtualMachineScaleSets() .createOrUpdateAsync(resourceGroupName(), name(), innerModel()); } private Mono<VirtualMachineScaleSet> updateResourceAsyncNoProfile(VirtualMachineScaleSetImpl self) { return manager() .serviceClient() .getVirtualMachineScaleSets() .updateAsync(resourceGroupName(), name(), VMSSPatchPayload.preparePatchPayload(this)) .map( vmssInner -> { setInner(vmssInner); self.clearCachedProperties(); self.initializeChildrenFromInner(); self.virtualMachineScaleSetMsiHandler.clear(); return self; }); } private void initVMProfileIfNecessary() { if (this.innerModel().virtualMachineProfile() == null) { this.innerModel().withVirtualMachineProfile(initDefaultVMProfile()); this.profileAttached = true; } } private VirtualMachineScaleSetVMProfile initDefaultVMProfile() { VirtualMachineScaleSetImpl impl = (VirtualMachineScaleSetImpl) this.manager() .virtualMachineScaleSets() .define(this.name()); if (this.orchestrationMode() == OrchestrationMode.FLEXIBLE) { if (this.innerModel().platformFaultDomainCount() != null) { impl.withFlexibleOrchestrationMode(this.innerModel().platformFaultDomainCount()); } else { impl.withFlexibleOrchestrationMode(); } } return impl.innerModel().virtualMachineProfile(); } private boolean isInUpdateMode() { return !this.isInCreateMode(); } private void setOSProfileDefaults() { if (this.innerModel().sku().capacity() == null) { this.withCapacity(2); } if (this.innerModel().upgradePolicy() == null || this.innerModel().upgradePolicy().mode() == null) { this.innerModel().withUpgradePolicy(new UpgradePolicy().withMode(UpgradeMode.AUTOMATIC)); } VirtualMachineScaleSetOSProfile osProfile = this.innerModel().virtualMachineProfile().osProfile(); VirtualMachineScaleSetOSDisk osDisk = this.innerModel().virtualMachineProfile().storageProfile().osDisk(); if (!removeOsProfile && isOSDiskFromImage(osDisk)) { if (this.osType() == OperatingSystemTypes.LINUX || this.isMarketplaceLinuxImage) { if (osProfile.linuxConfiguration() == null) { osProfile.withLinuxConfiguration(new LinuxConfiguration()); } osProfile.linuxConfiguration().withDisablePasswordAuthentication(osProfile.adminPassword() == null); } if (this.computerNamePrefix() == null) { if (this.name().matches("[0-9]+")) { withComputerNamePrefix(this.namer.getRandomName("vmss-vm", 12)); } else if (this.name().length() <= 12) { withComputerNamePrefix(this.name() + "-vm"); } else { withComputerNamePrefix(this.namer.getRandomName("vmss-vm", 12)); } } } else { this.innerModel().virtualMachineProfile().withOsProfile(null); } } private void setOSDiskDefault() { VirtualMachineScaleSetStorageProfile storageProfile = this.innerModel().virtualMachineProfile().storageProfile(); VirtualMachineScaleSetOSDisk osDisk = storageProfile.osDisk(); if (isOSDiskFromImage(osDisk)) { if (isManagedDiskEnabled()) { if (osDisk.managedDisk() == null) { osDisk.withManagedDisk(new VirtualMachineScaleSetManagedDiskParameters()); } if (osDisk.managedDisk().storageAccountType() == null) { osDisk.managedDisk().withStorageAccountType(StorageAccountTypes.STANDARD_LRS); } osDisk.withVhdContainers(null); } else { osDisk.withManagedDisk(null); if (osDisk.name() == null) { withOSDiskName(this.name() + "-os-disk"); } } } if (this.osDiskCachingType() == null) { withOSDiskCaching(CachingTypes.READ_WRITE); } } /* * Profile defaults should be set when: * 1. creating vmss * 2. attaching a profile to existing flexible vmss * @return */ private boolean shouldSetProfileDefaults() { return isInCreateMode() || (this.orchestrationMode() == OrchestrationMode.FLEXIBLE && this.profileAttached); } private void setExtensions() { if (this.extensions.size() > 0 && this.innerModel() != null && this.innerModel().virtualMachineProfile() != null) { this .innerModel() .virtualMachineProfile() .withExtensionProfile(new VirtualMachineScaleSetExtensionProfile()) .extensionProfile() .withExtensions(innersFromWrappers(this.extensions.values())); } } @Override public void beforeGroupCreateOrUpdate() { this.prepareOSDiskContainers(); this.bootDiagnosticsHandler.prepare(); } protected void prepareOSDiskContainers() { if (this.innerModel() == null || this.innerModel().virtualMachineProfile() == null || isManagedDiskEnabled()) { return; } final VirtualMachineScaleSetStorageProfile storageProfile = innerModel().virtualMachineProfile().storageProfile(); if (isOSDiskFromStoredImage(storageProfile)) { return; } if (this.isInCreateMode() && this.creatableStorageAccountKeys.isEmpty() && this.existingStorageAccountsToAssociate.isEmpty()) { String accountName = this.namer.getRandomName("stg", 24).replace("-", ""); Creatable<StorageAccount> storageAccountCreatable; if (this.creatableGroup != null) { storageAccountCreatable = this .storageManager .storageAccounts() .define(accountName) .withRegion(this.regionName()) .withNewResourceGroup(this.creatableGroup); } else { storageAccountCreatable = this .storageManager .storageAccounts() .define(accountName) .withRegion(this.regionName()) .withExistingResourceGroup(this.resourceGroupName()); } this.creatableStorageAccountKeys.add(this.addDependency(storageAccountCreatable)); } } private void handleUnManagedOSDiskContainers() { if (this.innerModel() == null || this.innerModel().virtualMachineProfile() == null) { return; } final VirtualMachineScaleSetStorageProfile storageProfile = innerModel().virtualMachineProfile().storageProfile(); if (isManagedDiskEnabled()) { storageProfile.osDisk().withVhdContainers(null); return; } if (isOSDiskFromStoredImage(storageProfile)) { storageProfile.osDisk().vhdContainers().clear(); return; } String containerName = null; for (String containerUrl : storageProfile.osDisk().vhdContainers()) { containerName = containerUrl.substring(containerUrl.lastIndexOf("/") + 1); break; } if (containerName == null) { containerName = "vhds"; } if (isInCreateMode() && this.creatableStorageAccountKeys.isEmpty() && this.existingStorageAccountsToAssociate.isEmpty()) { throw logger .logExceptionAsError( new IllegalStateException("Expected storage account(s) for VMSS OS disk containers not found")); } for (String storageAccountKey : this.creatableStorageAccountKeys) { StorageAccount storageAccount = this.<StorageAccount>taskResult(storageAccountKey); storageProfile .osDisk() .vhdContainers() .add(mergePath(storageAccount.endPoints().primary().blob(), containerName)); } for (StorageAccount storageAccount : this.existingStorageAccountsToAssociate) { storageProfile .osDisk() .vhdContainers() .add(mergePath(storageAccount.endPoints().primary().blob(), containerName)); } this.creatableStorageAccountKeys.clear(); this.existingStorageAccountsToAssociate.clear(); } private void setPrimaryIpConfigurationSubnet() { if (isInUpdateMode()) { return; } initVMProfileIfNecessary(); VirtualMachineScaleSetIpConfiguration ipConfig = this.primaryNicDefaultIpConfiguration(); ipConfig.withSubnet(new ApiEntityReference().withId(this.existingPrimaryNetworkSubnetNameToAssociate)); this.existingPrimaryNetworkSubnetNameToAssociate = null; } private Mono<VirtualMachineScaleSetImpl> setPrimaryIpConfigurationBackendsAndInboundNatPoolsAsync() { if (isInCreateMode()) { return Mono.just(this); } try { return this .loadCurrentPrimaryLoadBalancersIfAvailableAsync() .map( virtualMachineScaleSet -> { initVMProfileIfNecessary(); VirtualMachineScaleSetIpConfiguration primaryIpConfig = primaryNicDefaultIpConfiguration(); if (this.primaryInternetFacingLoadBalancer != null) { removeBackendsFromIpConfiguration( this.primaryInternetFacingLoadBalancer.id(), primaryIpConfig, this.primaryInternetFacingLBBackendsToRemoveOnUpdate.toArray(new String[0])); associateBackEndsToIpConfiguration( primaryInternetFacingLoadBalancer.id(), primaryIpConfig, this.primaryInternetFacingLBBackendsToAddOnUpdate.toArray(new String[0])); removeInboundNatPoolsFromIpConfiguration( this.primaryInternetFacingLoadBalancer.id(), primaryIpConfig, this.primaryInternetFacingLBInboundNatPoolsToRemoveOnUpdate.toArray(new String[0])); associateInboundNATPoolsToIpConfiguration( primaryInternetFacingLoadBalancer.id(), primaryIpConfig, this.primaryInternetFacingLBInboundNatPoolsToAddOnUpdate.toArray(new String[0])); } if (this.primaryInternalLoadBalancer != null) { removeBackendsFromIpConfiguration( this.primaryInternalLoadBalancer.id(), primaryIpConfig, this.primaryInternalLBBackendsToRemoveOnUpdate.toArray(new String[0])); associateBackEndsToIpConfiguration( primaryInternalLoadBalancer.id(), primaryIpConfig, this.primaryInternalLBBackendsToAddOnUpdate.toArray(new String[0])); removeInboundNatPoolsFromIpConfiguration( this.primaryInternalLoadBalancer.id(), primaryIpConfig, this.primaryInternalLBInboundNatPoolsToRemoveOnUpdate.toArray(new String[0])); associateInboundNATPoolsToIpConfiguration( primaryInternalLoadBalancer.id(), primaryIpConfig, this.primaryInternalLBInboundNatPoolsToAddOnUpdate.toArray(new String[0])); } if (this.removePrimaryInternetFacingLoadBalancerOnUpdate) { if (this.primaryInternetFacingLoadBalancer != null) { removeLoadBalancerAssociationFromIpConfiguration( this.primaryInternetFacingLoadBalancer, primaryIpConfig); } } if (this.removePrimaryInternalLoadBalancerOnUpdate) { if (this.primaryInternalLoadBalancer != null) { removeLoadBalancerAssociationFromIpConfiguration( this.primaryInternalLoadBalancer, primaryIpConfig); } } if (this.primaryInternetFacingLoadBalancerToAttachOnUpdate != null) { if (this.primaryInternetFacingLoadBalancer != null) { removeLoadBalancerAssociationFromIpConfiguration( this.primaryInternetFacingLoadBalancer, primaryIpConfig); } associateLoadBalancerToIpConfiguration( this.primaryInternetFacingLoadBalancerToAttachOnUpdate, primaryIpConfig); if (!this.primaryInternetFacingLBBackendsToAddOnUpdate.isEmpty()) { removeAllBackendAssociationFromIpConfiguration( this.primaryInternetFacingLoadBalancerToAttachOnUpdate, primaryIpConfig); associateBackEndsToIpConfiguration( this.primaryInternetFacingLoadBalancerToAttachOnUpdate.id(), primaryIpConfig, this.primaryInternetFacingLBBackendsToAddOnUpdate.toArray(new String[0])); } if (!this.primaryInternetFacingLBInboundNatPoolsToAddOnUpdate.isEmpty()) { removeAllInboundNatPoolAssociationFromIpConfiguration( this.primaryInternetFacingLoadBalancerToAttachOnUpdate, primaryIpConfig); associateInboundNATPoolsToIpConfiguration( this.primaryInternetFacingLoadBalancerToAttachOnUpdate.id(), primaryIpConfig, this.primaryInternetFacingLBInboundNatPoolsToAddOnUpdate.toArray(new String[0])); } } if (this.primaryInternalLoadBalancerToAttachOnUpdate != null) { if (this.primaryInternalLoadBalancer != null) { removeLoadBalancerAssociationFromIpConfiguration( this.primaryInternalLoadBalancer, primaryIpConfig); } associateLoadBalancerToIpConfiguration( this.primaryInternalLoadBalancerToAttachOnUpdate, primaryIpConfig); if (!this.primaryInternalLBBackendsToAddOnUpdate.isEmpty()) { removeAllBackendAssociationFromIpConfiguration( this.primaryInternalLoadBalancerToAttachOnUpdate, primaryIpConfig); associateBackEndsToIpConfiguration( this.primaryInternalLoadBalancerToAttachOnUpdate.id(), primaryIpConfig, this.primaryInternalLBBackendsToAddOnUpdate.toArray(new String[0])); } if (!this.primaryInternalLBInboundNatPoolsToAddOnUpdate.isEmpty()) { removeAllInboundNatPoolAssociationFromIpConfiguration( this.primaryInternalLoadBalancerToAttachOnUpdate, primaryIpConfig); associateInboundNATPoolsToIpConfiguration( this.primaryInternalLoadBalancerToAttachOnUpdate.id(), primaryIpConfig, this.primaryInternalLBInboundNatPoolsToAddOnUpdate.toArray(new String[0])); } } this.removePrimaryInternetFacingLoadBalancerOnUpdate = false; this.removePrimaryInternalLoadBalancerOnUpdate = false; this.primaryInternetFacingLoadBalancerToAttachOnUpdate = null; this.primaryInternalLoadBalancerToAttachOnUpdate = null; this.primaryInternetFacingLBBackendsToRemoveOnUpdate.clear(); this.primaryInternetFacingLBInboundNatPoolsToRemoveOnUpdate.clear(); this.primaryInternalLBBackendsToRemoveOnUpdate.clear(); this.primaryInternalLBInboundNatPoolsToRemoveOnUpdate.clear(); this.primaryInternetFacingLBBackendsToAddOnUpdate.clear(); this.primaryInternetFacingLBInboundNatPoolsToAddOnUpdate.clear(); this.primaryInternalLBBackendsToAddOnUpdate.clear(); this.primaryInternalLBInboundNatPoolsToAddOnUpdate.clear(); return this; }); } catch (IOException ioException) { throw logger.logExceptionAsError(new RuntimeException(ioException)); } } private void clearCachedProperties() { this.primaryInternetFacingLoadBalancer = null; this.primaryInternalLoadBalancer = null; this.profileAttached = false; } private Mono<VirtualMachineScaleSetImpl> loadCurrentPrimaryLoadBalancersIfAvailableAsync() throws IOException { Mono<VirtualMachineScaleSetImpl> self = Mono.just(this); if (this.primaryInternetFacingLoadBalancer != null && this.primaryInternalLoadBalancer != null) { return self; } String firstLoadBalancerId = null; VirtualMachineScaleSetIpConfiguration ipConfig = primaryNicDefaultIpConfiguration(); if (ipConfig == null) { return self; } if (!ipConfig.loadBalancerBackendAddressPools().isEmpty()) { firstLoadBalancerId = ResourceUtils.parentResourceIdFromResourceId(ipConfig.loadBalancerBackendAddressPools().get(0).id()); } if (firstLoadBalancerId == null && !ipConfig.loadBalancerInboundNatPools().isEmpty()) { firstLoadBalancerId = ResourceUtils.parentResourceIdFromResourceId(ipConfig.loadBalancerInboundNatPools().get(0).id()); } if (firstLoadBalancerId == null) { return self; } self = self .concatWith( Mono .just(firstLoadBalancerId) .flatMap( id -> this .networkManager .loadBalancers() .getByIdAsync(id) .map( loadBalancer1 -> { if (loadBalancer1.publicIpAddressIds() != null && loadBalancer1.publicIpAddressIds().size() > 0) { this.primaryInternetFacingLoadBalancer = loadBalancer1; } else { this.primaryInternalLoadBalancer = loadBalancer1; } return this; }))) .last(); String secondLoadBalancerId = null; for (SubResource subResource : ipConfig.loadBalancerBackendAddressPools()) { if (!subResource.id().toLowerCase(Locale.ROOT).startsWith(firstLoadBalancerId.toLowerCase(Locale.ROOT))) { secondLoadBalancerId = ResourceUtils.parentResourceIdFromResourceId(subResource.id()); break; } } if (secondLoadBalancerId == null) { for (SubResource subResource : ipConfig.loadBalancerInboundNatPools()) { if (!subResource .id() .toLowerCase(Locale.ROOT) .startsWith(firstLoadBalancerId.toLowerCase(Locale.ROOT))) { secondLoadBalancerId = ResourceUtils.parentResourceIdFromResourceId(subResource.id()); break; } } } if (secondLoadBalancerId == null) { return self; } return self .concatWith( Mono .just(secondLoadBalancerId) .flatMap( id -> networkManager .loadBalancers() .getByIdAsync(id) .map( loadBalancer2 -> { if (loadBalancer2.publicIpAddressIds() != null && loadBalancer2.publicIpAddressIds().size() > 0) { this.primaryInternetFacingLoadBalancer = loadBalancer2; } else { this.primaryInternalLoadBalancer = loadBalancer2; } return this; }))) .last(); } private VirtualMachineScaleSetIpConfiguration primaryNicDefaultIpConfiguration() { if (this.innerModel() == null || this.innerModel().virtualMachineProfile() == null) { return null; } List<VirtualMachineScaleSetNetworkConfiguration> nicConfigurations = this.innerModel().virtualMachineProfile().networkProfile().networkInterfaceConfigurations(); for (VirtualMachineScaleSetNetworkConfiguration nicConfiguration : nicConfigurations) { if (nicConfiguration.primary()) { if (nicConfiguration.ipConfigurations().size() > 0) { VirtualMachineScaleSetIpConfiguration ipConfig = nicConfiguration.ipConfigurations().get(0); if (ipConfig.loadBalancerBackendAddressPools() == null) { ipConfig.withLoadBalancerBackendAddressPools(new ArrayList<>()); } if (ipConfig.loadBalancerInboundNatPools() == null) { ipConfig.withLoadBalancerInboundNatPools(new ArrayList<>()); } return ipConfig; } } } throw logger .logExceptionAsError( new RuntimeException("Could not find the primary nic configuration or an IP configuration in it")); } private VirtualMachineScaleSetNetworkConfiguration primaryNicConfiguration() { if (this.innerModel() == null || this.innerModel().virtualMachineProfile() == null) { return null; } List<VirtualMachineScaleSetNetworkConfiguration> nicConfigurations = this.innerModel().virtualMachineProfile().networkProfile().networkInterfaceConfigurations(); for (VirtualMachineScaleSetNetworkConfiguration nicConfiguration : nicConfigurations) { if (nicConfiguration.primary()) { return nicConfiguration; } } throw logger.logExceptionAsError(new RuntimeException("Could not find the primary nic configuration")); } private static void associateBackEndsToIpConfiguration( String loadBalancerId, VirtualMachineScaleSetIpConfiguration ipConfig, String... backendNames) { List<SubResource> backendSubResourcesToAssociate = new ArrayList<>(); for (String backendName : backendNames) { String backendPoolId = mergePath(loadBalancerId, "backendAddressPools", backendName); boolean found = false; for (SubResource subResource : ipConfig.loadBalancerBackendAddressPools()) { if (subResource.id().equalsIgnoreCase(backendPoolId)) { found = true; break; } } if (!found) { backendSubResourcesToAssociate.add(new SubResource().withId(backendPoolId)); } } for (SubResource backendSubResource : backendSubResourcesToAssociate) { ipConfig.loadBalancerBackendAddressPools().add(backendSubResource); } } private static void associateInboundNATPoolsToIpConfiguration( String loadBalancerId, VirtualMachineScaleSetIpConfiguration ipConfig, String... inboundNatPools) { List<SubResource> inboundNatPoolSubResourcesToAssociate = new ArrayList<>(); for (String inboundNatPool : inboundNatPools) { String inboundNatPoolId = mergePath(loadBalancerId, "inboundNatPools", inboundNatPool); boolean found = false; for (SubResource subResource : ipConfig.loadBalancerInboundNatPools()) { if (subResource.id().equalsIgnoreCase(inboundNatPoolId)) { found = true; break; } } if (!found) { inboundNatPoolSubResourcesToAssociate.add(new SubResource().withId(inboundNatPoolId)); } } for (SubResource backendSubResource : inboundNatPoolSubResourcesToAssociate) { ipConfig.loadBalancerInboundNatPools().add(backendSubResource); } } private static Map<String, LoadBalancerBackend> getBackendsAssociatedWithIpConfiguration( LoadBalancer loadBalancer, VirtualMachineScaleSetIpConfiguration ipConfig) { String loadBalancerId = loadBalancer.id(); Map<String, LoadBalancerBackend> attachedBackends = new HashMap<>(); Map<String, LoadBalancerBackend> lbBackends = loadBalancer.backends(); for (LoadBalancerBackend lbBackend : lbBackends.values()) { String backendId = mergePath(loadBalancerId, "backendAddressPools", lbBackend.name()); for (SubResource subResource : ipConfig.loadBalancerBackendAddressPools()) { if (subResource.id().equalsIgnoreCase(backendId)) { attachedBackends.put(lbBackend.name(), lbBackend); } } } return attachedBackends; } private static Map<String, LoadBalancerInboundNatPool> getInboundNatPoolsAssociatedWithIpConfiguration( LoadBalancer loadBalancer, VirtualMachineScaleSetIpConfiguration ipConfig) { String loadBalancerId = loadBalancer.id(); Map<String, LoadBalancerInboundNatPool> attachedInboundNatPools = new HashMap<>(); Map<String, LoadBalancerInboundNatPool> lbInboundNatPools = loadBalancer.inboundNatPools(); for (LoadBalancerInboundNatPool lbInboundNatPool : lbInboundNatPools.values()) { String inboundNatPoolId = mergePath(loadBalancerId, "inboundNatPools", lbInboundNatPool.name()); for (SubResource subResource : ipConfig.loadBalancerInboundNatPools()) { if (subResource.id().equalsIgnoreCase(inboundNatPoolId)) { attachedInboundNatPools.put(lbInboundNatPool.name(), lbInboundNatPool); } } } return attachedInboundNatPools; } private static void associateLoadBalancerToIpConfiguration( LoadBalancer loadBalancer, VirtualMachineScaleSetIpConfiguration ipConfig) { Collection<LoadBalancerBackend> backends = loadBalancer.backends().values(); String[] backendNames = new String[backends.size()]; int i = 0; for (LoadBalancerBackend backend : backends) { backendNames[i] = backend.name(); i++; } associateBackEndsToIpConfiguration(loadBalancer.id(), ipConfig, backendNames); Collection<LoadBalancerInboundNatPool> inboundNatPools = loadBalancer.inboundNatPools().values(); String[] natPoolNames = new String[inboundNatPools.size()]; i = 0; for (LoadBalancerInboundNatPool inboundNatPool : inboundNatPools) { natPoolNames[i] = inboundNatPool.name(); i++; } associateInboundNATPoolsToIpConfiguration(loadBalancer.id(), ipConfig, natPoolNames); } private static void removeLoadBalancerAssociationFromIpConfiguration( LoadBalancer loadBalancer, VirtualMachineScaleSetIpConfiguration ipConfig) { removeAllBackendAssociationFromIpConfiguration(loadBalancer, ipConfig); removeAllInboundNatPoolAssociationFromIpConfiguration(loadBalancer, ipConfig); } private static void removeAllBackendAssociationFromIpConfiguration( LoadBalancer loadBalancer, VirtualMachineScaleSetIpConfiguration ipConfig) { List<SubResource> toRemove = new ArrayList<>(); for (SubResource subResource : ipConfig.loadBalancerBackendAddressPools()) { if (subResource .id() .toLowerCase(Locale.ROOT) .startsWith(loadBalancer.id().toLowerCase(Locale.ROOT) + "/")) { toRemove.add(subResource); } } for (SubResource subResource : toRemove) { ipConfig.loadBalancerBackendAddressPools().remove(subResource); } } private static void removeAllInboundNatPoolAssociationFromIpConfiguration( LoadBalancer loadBalancer, VirtualMachineScaleSetIpConfiguration ipConfig) { List<SubResource> toRemove = new ArrayList<>(); for (SubResource subResource : ipConfig.loadBalancerInboundNatPools()) { if (subResource .id() .toLowerCase(Locale.ROOT) .startsWith(loadBalancer.id().toLowerCase(Locale.ROOT) + "/")) { toRemove.add(subResource); } } for (SubResource subResource : toRemove) { ipConfig.loadBalancerInboundNatPools().remove(subResource); } } private static void removeBackendsFromIpConfiguration( String loadBalancerId, VirtualMachineScaleSetIpConfiguration ipConfig, String... backendNames) { List<SubResource> toRemove = new ArrayList<>(); for (String backendName : backendNames) { String backendPoolId = mergePath(loadBalancerId, "backendAddressPools", backendName); for (SubResource subResource : ipConfig.loadBalancerBackendAddressPools()) { if (subResource.id().equalsIgnoreCase(backendPoolId)) { toRemove.add(subResource); break; } } } for (SubResource subResource : toRemove) { ipConfig.loadBalancerBackendAddressPools().remove(subResource); } } private static void removeInboundNatPoolsFromIpConfiguration( String loadBalancerId, VirtualMachineScaleSetIpConfiguration ipConfig, String... inboundNatPoolNames) { List<SubResource> toRemove = new ArrayList<>(); for (String natPoolName : inboundNatPoolNames) { String inboundNatPoolId = mergePath(loadBalancerId, "inboundNatPools", natPoolName); for (SubResource subResource : ipConfig.loadBalancerInboundNatPools()) { if (subResource.id().equalsIgnoreCase(inboundNatPoolId)) { toRemove.add(subResource); break; } } } for (SubResource subResource : toRemove) { ipConfig.loadBalancerInboundNatPools().remove(subResource); } } private static <T> void addToList(List<T> list, T[] items) { list.addAll(Arrays.asList(items)); } private static String mergePath(String... segments) { StringBuilder builder = new StringBuilder(); for (String segment : segments) { while (segment.length() > 1 && segment.endsWith("/")) { segment = segment.substring(0, segment.length() - 1); } if (segment.length() > 0) { builder.append(segment); builder.append("/"); } } String merged = builder.toString(); if (merged.endsWith("/")) { merged = merged.substring(0, merged.length() - 1); } return merged; } RoleAssignmentHelper.IdProvider idProvider() { return new RoleAssignmentHelper.IdProvider() { @Override public String principalId() { if (innerModel() != null && innerModel().identity() != null) { return innerModel().identity().principalId(); } else { return null; } } @Override public String resourceId() { if (innerModel() != null) { return innerModel().id(); } else { return null; } } }; } protected VirtualMachineScaleSetImpl withUnmanagedDataDisk( VirtualMachineScaleSetUnmanagedDataDiskImpl unmanagedDisk) { initVMProfileIfNecessary(); if (this.innerModel().virtualMachineProfile().storageProfile().dataDisks() == null) { this .innerModel() .virtualMachineProfile() .storageProfile() .withDataDisks(new ArrayList<VirtualMachineScaleSetDataDisk>()); } List<VirtualMachineScaleSetDataDisk> dataDisks = this.innerModel().virtualMachineProfile().storageProfile().dataDisks(); dataDisks.add(unmanagedDisk.innerModel()); return this; } @Override public VirtualMachineScaleSetImpl withAvailabilityZone(AvailabilityZoneId zoneId) { if (this.innerModel().zones() == null) { this.innerModel().withZones(new ArrayList<>()); } this.innerModel().zones().add(zoneId.toString()); return this; } /** * Checks whether the OS disk is based on an image (image from PIR or custom image [captured, bringYourOwnFeature]). * * @param osDisk the osDisk value in the storage profile * @return true if the OS disk is configured to use image from PIR or custom image */ private boolean isOSDiskFromImage(VirtualMachineScaleSetOSDisk osDisk) { return osDisk.createOption() == DiskCreateOptionTypes.FROM_IMAGE; } /** * Checks whether the OS disk is based on a CustomImage. * * <p>A custom image is represented by {@link VirtualMachineCustomImage}. * * @param storageProfile the storage profile * @return true if the OS disk is configured to be based on custom image. */ private boolean isOsDiskFromCustomImage(VirtualMachineScaleSetStorageProfile storageProfile) { ImageReference imageReference = storageProfile.imageReference(); return isOSDiskFromImage(storageProfile.osDisk()) && imageReference != null && imageReference.id() != null; } /** * Checks whether the OS disk is based on an platform image (image in PIR). * * @param storageProfile the storage profile * @return true if the OS disk is configured to be based on platform image. */ private boolean isOSDiskFromPlatformImage(VirtualMachineScaleSetStorageProfile storageProfile) { ImageReference imageReference = storageProfile.imageReference(); return isOSDiskFromImage(storageProfile.osDisk()) && imageReference != null && imageReference.publisher() != null && imageReference.offer() != null && imageReference.sku() != null && imageReference.version() != null; } /** * Checks whether the OS disk is based on a stored image ('captured' or 'bring your own feature'). * * @param storageProfile the storage profile * @return true if the OS disk is configured to use custom image ('captured' or 'bring your own feature') */ private boolean isOSDiskFromStoredImage(VirtualMachineScaleSetStorageProfile storageProfile) { VirtualMachineScaleSetOSDisk osDisk = storageProfile.osDisk(); return isOSDiskFromImage(osDisk) && osDisk.image() != null && osDisk.image().uri() != null; } private void throwIfManagedDiskDisabled(String message) { if (!this.isManagedDiskEnabled()) { throw logger.logExceptionAsError(new UnsupportedOperationException(message)); } } @Override public VirtualMachineScaleSetImpl withBootDiagnosticsOnManagedStorageAccount() { this.bootDiagnosticsHandler.withBootDiagnostics(true); return this; } @Override public VirtualMachineScaleSetImpl withBootDiagnostics() { this.bootDiagnosticsHandler.withBootDiagnostics(false); return this; } @Override public VirtualMachineScaleSetImpl withBootDiagnostics(Creatable<StorageAccount> creatable) { this.bootDiagnosticsHandler.withBootDiagnostics(creatable); return this; } @Override public VirtualMachineScaleSetImpl withBootDiagnostics(StorageAccount storageAccount) { this.bootDiagnosticsHandler.withBootDiagnostics(storageAccount); return this; } @Override public VirtualMachineScaleSetImpl withBootDiagnostics(String storageAccountBlobEndpointUri) { this.bootDiagnosticsHandler.withBootDiagnostics(storageAccountBlobEndpointUri); return this; } @Override public VirtualMachineScaleSetImpl withoutBootDiagnostics() { this.bootDiagnosticsHandler.withoutBootDiagnostics(); return this; } @Override public VirtualMachineScaleSetImpl withMaxPrice(Double maxPrice) { this.innerModel().virtualMachineProfile().withBillingProfile(new BillingProfile().withMaxPrice(maxPrice)); return this; } @Override public VirtualMachineScaleSetImpl withVirtualMachinePriority(VirtualMachinePriorityTypes priority) { this.innerModel().virtualMachineProfile().withPriority(priority); return this; } @Override public VirtualMachineScaleSetImpl withLowPriorityVirtualMachine() { this.withVirtualMachinePriority(VirtualMachinePriorityTypes.LOW); return this; } @Override public VirtualMachineScaleSetImpl withLowPriorityVirtualMachine(VirtualMachineEvictionPolicyTypes policy) { this.withLowPriorityVirtualMachine(); this.innerModel().virtualMachineProfile().withEvictionPolicy(policy); return this; } @Override public VirtualMachineScaleSetImpl withSpotPriorityVirtualMachine() { this.withVirtualMachinePriority(VirtualMachinePriorityTypes.SPOT); return this; } @Override public VirtualMachineScaleSetImpl withSpotPriorityVirtualMachine(VirtualMachineEvictionPolicyTypes policy) { this.withSpotPriorityVirtualMachine(); this.innerModel().virtualMachineProfile().withEvictionPolicy(policy); return this; } @Override public VirtualMachineScaleSetImpl withVirtualMachinePublicIp() { initVMProfileIfNecessary(); VirtualMachineScaleSetIpConfiguration nicIpConfig = this.primaryNicDefaultIpConfiguration(); if (nicIpConfig.publicIpAddressConfiguration() != null) { return this; } else { VirtualMachineScaleSetPublicIpAddressConfiguration pipConfig = new VirtualMachineScaleSetPublicIpAddressConfiguration(); pipConfig.withName("pip1"); pipConfig.withIdleTimeoutInMinutes(15); nicIpConfig.withPublicIpAddressConfiguration(pipConfig); return this; } } @Override public VirtualMachineScaleSetImpl withVirtualMachinePublicIp(String leafDomainLabel) { initVMProfileIfNecessary(); VirtualMachineScaleSetIpConfiguration nicIpConfig = this.primaryNicDefaultIpConfiguration(); if (nicIpConfig.publicIpAddressConfiguration() != null) { if (nicIpConfig.publicIpAddressConfiguration().dnsSettings() != null) { nicIpConfig.publicIpAddressConfiguration().dnsSettings().withDomainNameLabel(leafDomainLabel); } else { nicIpConfig .publicIpAddressConfiguration() .withDnsSettings(new VirtualMachineScaleSetPublicIpAddressConfigurationDnsSettings()); nicIpConfig.publicIpAddressConfiguration().dnsSettings().withDomainNameLabel(leafDomainLabel); } } else { VirtualMachineScaleSetPublicIpAddressConfiguration pipConfig = new VirtualMachineScaleSetPublicIpAddressConfiguration(); pipConfig.withName("pip1"); pipConfig.withIdleTimeoutInMinutes(15); pipConfig.withDnsSettings(new VirtualMachineScaleSetPublicIpAddressConfigurationDnsSettings()); pipConfig.dnsSettings().withDomainNameLabel(leafDomainLabel); nicIpConfig.withPublicIpAddressConfiguration(pipConfig); } return this; } @Override public VirtualMachineScaleSetImpl withVirtualMachinePublicIp( VirtualMachineScaleSetPublicIpAddressConfiguration pipConfig) { initVMProfileIfNecessary(); VirtualMachineScaleSetIpConfiguration nicIpConfig = this.primaryNicDefaultIpConfiguration(); nicIpConfig.withPublicIpAddressConfiguration(pipConfig); return this; } @Override public VirtualMachineScaleSetImpl withAcceleratedNetworking() { initVMProfileIfNecessary(); VirtualMachineScaleSetNetworkConfiguration nicConfig = this.primaryNicConfiguration(); nicConfig.withEnableAcceleratedNetworking(true); return this; } @Override public VirtualMachineScaleSetImpl withoutAcceleratedNetworking() { initVMProfileIfNecessary(); VirtualMachineScaleSetNetworkConfiguration nicConfig = this.primaryNicConfiguration(); nicConfig.withEnableAcceleratedNetworking(false); return this; } @Override public VirtualMachineScaleSetImpl withIpForwarding() { initVMProfileIfNecessary(); VirtualMachineScaleSetNetworkConfiguration nicConfig = this.primaryNicConfiguration(); nicConfig.withEnableIpForwarding(true); return this; } @Override public VirtualMachineScaleSetImpl withoutIpForwarding() { VirtualMachineScaleSetNetworkConfiguration nicConfig = this.primaryNicConfiguration(); if (nicConfig == null) { return this; } nicConfig.withEnableIpForwarding(false); return this; } @Override public VirtualMachineScaleSetImpl withExistingNetworkSecurityGroup(NetworkSecurityGroup networkSecurityGroup) { initVMProfileIfNecessary(); VirtualMachineScaleSetNetworkConfiguration nicConfig = this.primaryNicConfiguration(); nicConfig.withNetworkSecurityGroup(new SubResource().withId(networkSecurityGroup.id())); return this; } @Override public VirtualMachineScaleSetImpl withExistingNetworkSecurityGroupId(String networkSecurityGroupId) { initVMProfileIfNecessary(); VirtualMachineScaleSetNetworkConfiguration nicConfig = this.primaryNicConfiguration(); nicConfig.withNetworkSecurityGroup(new SubResource().withId(networkSecurityGroupId)); return this; } @Override public VirtualMachineScaleSetImpl withoutNetworkSecurityGroup() { VirtualMachineScaleSetNetworkConfiguration nicConfig = this.primaryNicConfiguration(); if (nicConfig == null) { return this; } nicConfig.withNetworkSecurityGroup(null); return this; } @Override public VirtualMachineScaleSetImpl withSinglePlacementGroup() { this.innerModel().withSinglePlacementGroup(true); return this; } @Override public VirtualMachineScaleSetImpl withoutSinglePlacementGroup() { this.innerModel().withSinglePlacementGroup(false); return this; } @Override public VirtualMachineScaleSetImpl withExistingApplicationGatewayBackendPool(String backendPoolId) { initVMProfileIfNecessary(); VirtualMachineScaleSetIpConfiguration nicIpConfig = primaryNicDefaultIpConfiguration(); if (nicIpConfig.applicationGatewayBackendAddressPools() == null) { nicIpConfig.withApplicationGatewayBackendAddressPools(new ArrayList<>()); } boolean found = false; for (SubResource backendPool : nicIpConfig.applicationGatewayBackendAddressPools()) { if (backendPool.id().equalsIgnoreCase(backendPoolId)) { found = true; break; } } if (!found) { nicIpConfig.applicationGatewayBackendAddressPools().add(new SubResource().withId(backendPoolId)); } return this; } @Override public VirtualMachineScaleSetImpl withoutApplicationGatewayBackendPool(String backendPoolId) { VirtualMachineScaleSetIpConfiguration nicIpConfig = primaryNicDefaultIpConfiguration(); if (nicIpConfig == null || nicIpConfig.applicationGatewayBackendAddressPools() == null) { return this; } else { int foundIndex = -1; int index = -1; for (SubResource backendPool : nicIpConfig.applicationGatewayBackendAddressPools()) { index = index + 1; if (backendPool.id().equalsIgnoreCase(backendPoolId)) { foundIndex = index; break; } } if (foundIndex != -1) { nicIpConfig.applicationGatewayBackendAddressPools().remove(foundIndex); } return this; } } @Override public VirtualMachineScaleSetImpl withExistingApplicationSecurityGroup( ApplicationSecurityGroup applicationSecurityGroup) { return withExistingApplicationSecurityGroupId(applicationSecurityGroup.id()); } @Override public VirtualMachineScaleSetImpl withExistingApplicationSecurityGroupId(String applicationSecurityGroupId) { initVMProfileIfNecessary(); VirtualMachineScaleSetIpConfiguration nicIpConfig = primaryNicDefaultIpConfiguration(); if (nicIpConfig.applicationSecurityGroups() == null) { nicIpConfig.withApplicationSecurityGroups(new ArrayList<>()); } boolean found = false; for (SubResource asg : nicIpConfig.applicationSecurityGroups()) { if (asg.id().equalsIgnoreCase(applicationSecurityGroupId)) { found = true; break; } } if (!found) { nicIpConfig.applicationSecurityGroups().add(new SubResource().withId(applicationSecurityGroupId)); } return this; } @Override public VirtualMachineScaleSetImpl withoutApplicationSecurityGroup(String applicationSecurityGroupId) { VirtualMachineScaleSetIpConfiguration nicIpConfig = primaryNicDefaultIpConfiguration(); if (nicIpConfig == null || nicIpConfig.applicationSecurityGroups() == null) { return this; } else { int foundIndex = -1; int index = -1; for (SubResource asg : nicIpConfig.applicationSecurityGroups()) { index = index + 1; if (asg.id().equalsIgnoreCase(applicationSecurityGroupId)) { foundIndex = index; break; } } if (foundIndex != -1) { nicIpConfig.applicationSecurityGroups().remove(foundIndex); } return this; } } @Override public VirtualMachineScaleSetImpl withProximityPlacementGroup(String proximityPlacementGroupId) { this.innerModel().withProximityPlacementGroup(new SubResource().withId(proximityPlacementGroupId)); this.newProximityPlacementGroupName = null; return this; } @Override public VirtualMachineScaleSetImpl withNewProximityPlacementGroup( String proximityPlacementGroupName, ProximityPlacementGroupType type) { this.newProximityPlacementGroupName = proximityPlacementGroupName; this.newProximityPlacementGroupType = type; this.innerModel().withProximityPlacementGroup(null); return this; } @Override public VirtualMachineScaleSetImpl withDoNotRunExtensionsOnOverprovisionedVMs( Boolean doNotRunExtensionsOnOverprovisionedVMs) { this.innerModel().withDoNotRunExtensionsOnOverprovisionedVMs(doNotRunExtensionsOnOverprovisionedVMs); return this; } @Override public VirtualMachineScaleSetImpl withAdditionalCapabilities(AdditionalCapabilities additionalCapabilities) { this.innerModel().withAdditionalCapabilities(additionalCapabilities); return this; } private void createNewProximityPlacementGroup() { if (isInCreateMode()) { if (this.newProximityPlacementGroupName != null && !this.newProximityPlacementGroupName.isEmpty()) { ProximityPlacementGroupInner plgInner = new ProximityPlacementGroupInner(); plgInner.withProximityPlacementGroupType(this.newProximityPlacementGroupType); plgInner.withLocation(this.innerModel().location()); plgInner = this .manager() .serviceClient() .getProximityPlacementGroups() .createOrUpdate(this.resourceGroupName(), this.newProximityPlacementGroupName, plgInner); this.innerModel().withProximityPlacementGroup((new SubResource().withId(plgInner.id()))); } } } @Override public VirtualMachineScaleSetImpl withPlan(PurchasePlan plan) { this.innerModel().withPlan(new Plan()); this.innerModel().plan().withPublisher(plan.publisher()).withProduct(plan.product()).withName(plan.name()); return this; } /** * Class to manage Data Disk collection. */ private class ManagedDataDiskCollection { private final List<VirtualMachineScaleSetDataDisk> implicitDisksToAssociate = new ArrayList<>(); private final List<Integer> diskLunsToRemove = new ArrayList<>(); private final List<VirtualMachineScaleSetDataDisk> newDisksFromImage = new ArrayList<>(); private final VirtualMachineScaleSetImpl vmss; private CachingTypes defaultCachingType; private StorageAccountTypes defaultStorageAccountType; ManagedDataDiskCollection(VirtualMachineScaleSetImpl vmss) { this.vmss = vmss; } void setDefaultCachingType(CachingTypes cachingType) { this.defaultCachingType = cachingType; } void setDefaultStorageAccountType(StorageAccountTypes defaultStorageAccountType) { this.defaultStorageAccountType = defaultStorageAccountType; } void setDataDisksDefaults() { if (this.vmss.innerModel() == null || this.vmss.innerModel().virtualMachineProfile() == null) { return; } VirtualMachineScaleSetStorageProfile storageProfile = this.vmss.innerModel().virtualMachineProfile().storageProfile(); if (isPending()) { if (storageProfile.dataDisks() == null) { storageProfile.withDataDisks(new ArrayList<>()); } List<VirtualMachineScaleSetDataDisk> dataDisks = storageProfile.dataDisks(); final List<Integer> usedLuns = new ArrayList<>(); for (VirtualMachineScaleSetDataDisk dataDisk : dataDisks) { if (dataDisk.lun() != -1) { usedLuns.add(dataDisk.lun()); } } for (VirtualMachineScaleSetDataDisk dataDisk : this.implicitDisksToAssociate) { if (dataDisk.lun() != -1) { usedLuns.add(dataDisk.lun()); } } for (VirtualMachineScaleSetDataDisk dataDisk : this.newDisksFromImage) { if (dataDisk.lun() != -1) { usedLuns.add(dataDisk.lun()); } } Callable<Integer> nextLun = () -> { Integer lun = 0; while (usedLuns.contains(lun)) { lun++; } usedLuns.add(lun); return lun; }; try { setImplicitDataDisks(nextLun); } catch (Exception ex) { throw logger.logExceptionAsError(Exceptions.propagate(ex)); } setImageBasedDataDisks(); removeDataDisks(); } if (storageProfile.dataDisks() != null && storageProfile.dataDisks().size() == 0) { if (vmss.isInCreateMode()) { storageProfile.withDataDisks(null); } } this.clear(); } private void clear() { implicitDisksToAssociate.clear(); diskLunsToRemove.clear(); newDisksFromImage.clear(); } private boolean isPending() { return implicitDisksToAssociate.size() > 0 || diskLunsToRemove.size() > 0 || newDisksFromImage.size() > 0; } private void setImplicitDataDisks(Callable<Integer> nextLun) throws Exception { if (this.vmss.innerModel() == null || this.vmss.innerModel().virtualMachineProfile() == null) { return; } VirtualMachineScaleSetStorageProfile storageProfile = this.vmss.innerModel().virtualMachineProfile().storageProfile(); List<VirtualMachineScaleSetDataDisk> dataDisks = storageProfile.dataDisks(); for (VirtualMachineScaleSetDataDisk dataDisk : this.implicitDisksToAssociate) { dataDisk.withCreateOption(DiskCreateOptionTypes.EMPTY); if (dataDisk.lun() == -1) { dataDisk.withLun(nextLun.call()); } if (dataDisk.managedDisk() == null) { dataDisk.withManagedDisk(new VirtualMachineScaleSetManagedDiskParameters()); } if (dataDisk.caching() == null) { dataDisk.withCaching(getDefaultCachingType()); } if (dataDisk.managedDisk().storageAccountType() == null) { dataDisk.managedDisk().withStorageAccountType(getDefaultStorageAccountType()); } dataDisk.withName(null); dataDisks.add(dataDisk); } } private void setImageBasedDataDisks() { if (this.vmss.innerModel() == null || this.vmss.innerModel().virtualMachineProfile() == null) { return; } VirtualMachineScaleSetStorageProfile storageProfile = this.vmss.innerModel().virtualMachineProfile().storageProfile(); List<VirtualMachineScaleSetDataDisk> dataDisks = storageProfile.dataDisks(); for (VirtualMachineScaleSetDataDisk dataDisk : this.newDisksFromImage) { dataDisk.withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); dataDisk.withName(null); dataDisks.add(dataDisk); } } private void removeDataDisks() { if (this.vmss.innerModel() == null || this.vmss.innerModel().virtualMachineProfile() == null) { return; } VirtualMachineScaleSetStorageProfile storageProfile = this.vmss.innerModel().virtualMachineProfile().storageProfile(); List<VirtualMachineScaleSetDataDisk> dataDisks = storageProfile.dataDisks(); for (Integer lun : this.diskLunsToRemove) { int indexToRemove = 0; for (VirtualMachineScaleSetDataDisk dataDisk : dataDisks) { if (dataDisk.lun() == lun) { dataDisks.remove(indexToRemove); break; } indexToRemove++; } } } private CachingTypes getDefaultCachingType() { if (defaultCachingType == null) { return CachingTypes.READ_WRITE; } return defaultCachingType; } private StorageAccountTypes getDefaultStorageAccountType() { if (defaultStorageAccountType == null) { return StorageAccountTypes.STANDARD_LRS; } return defaultStorageAccountType; } } /** Class to manage VMSS boot diagnostics settings. */ private class BootDiagnosticsHandler { private final VirtualMachineScaleSetImpl vmssImpl; private String creatableDiagnosticsStorageAccountKey; private String creatableStorageAccountKey; private StorageAccount existingStorageAccountToAssociate; private boolean useManagedStorageAccount = false; BootDiagnosticsHandler(VirtualMachineScaleSetImpl vmssImpl) { this.vmssImpl = vmssImpl; if (isBootDiagnosticsEnabled() && this.vmssInner() != null && this.vmssInner().virtualMachineProfile() != null && this.vmssInner().virtualMachineProfile() .diagnosticsProfile().bootDiagnostics().storageUri() == null) { this.useManagedStorageAccount = true; } } public boolean isBootDiagnosticsEnabled() { if (this.vmssInner() == null || this.vmssInner().virtualMachineProfile() == null) { return false; } DiagnosticsProfile diagnosticsProfile = this.vmssInner().virtualMachineProfile().diagnosticsProfile(); if (diagnosticsProfile != null && diagnosticsProfile.bootDiagnostics() != null && diagnosticsProfile.bootDiagnostics().enabled() != null) { return diagnosticsProfile.bootDiagnostics().enabled(); } return false; } public String bootDiagnosticsStorageUri() { if (this.vmssInner() == null || this.vmssInner().virtualMachineProfile() == null) { return null; } DiagnosticsProfile diagnosticsProfile = this.vmssInner().virtualMachineProfile().diagnosticsProfile(); if (diagnosticsProfile != null && diagnosticsProfile.bootDiagnostics() != null) { return diagnosticsProfile.bootDiagnostics().storageUri(); } return null; } BootDiagnosticsHandler withBootDiagnostics(boolean useManagedStorageAccount) { this.enableDisable(true); this.useManagedStorageAccount = useManagedStorageAccount; return this; } BootDiagnosticsHandler withBootDiagnostics(Creatable<StorageAccount> creatable) { this.enableDisable(true); this.useManagedStorageAccount = false; this.creatableDiagnosticsStorageAccountKey = this.vmssImpl.addDependency(creatable); return this; } BootDiagnosticsHandler withBootDiagnostics(String storageAccountBlobEndpointUri) { if (this.vmssInner() == null || this.vmssInner().virtualMachineProfile() == null) { return this; } this.enableDisable(true); this.useManagedStorageAccount = false; this .vmssInner() .virtualMachineProfile() .diagnosticsProfile() .bootDiagnostics() .withStorageUri(storageAccountBlobEndpointUri); return this; } BootDiagnosticsHandler withBootDiagnostics(StorageAccount storageAccount) { return this.withBootDiagnostics(storageAccount.endPoints().primary().blob()); } BootDiagnosticsHandler withoutBootDiagnostics() { this.enableDisable(false); this.useManagedStorageAccount = false; return this; } void prepare() { if (useManagedStorageAccount) { return; } this.creatableStorageAccountKey = null; this.existingStorageAccountToAssociate = null; if (this.vmssInner() == null || this.vmssInner().virtualMachineProfile() == null) { return; } DiagnosticsProfile diagnosticsProfile = this.vmssInner().virtualMachineProfile().diagnosticsProfile(); if (diagnosticsProfile == null || diagnosticsProfile.bootDiagnostics() == null || diagnosticsProfile.bootDiagnostics().storageUri() != null) { return; } boolean enableBD = ResourceManagerUtils.toPrimitiveBoolean(diagnosticsProfile.bootDiagnostics().enabled()); if (!enableBD) { return; } if (this.creatableDiagnosticsStorageAccountKey != null) { return; } if (!this.vmssImpl.creatableStorageAccountKeys.isEmpty()) { this.creatableStorageAccountKey = this.vmssImpl.creatableStorageAccountKeys.get(0); return; } if (!this.vmssImpl.existingStorageAccountsToAssociate.isEmpty()) { this.existingStorageAccountToAssociate = this.vmssImpl.existingStorageAccountsToAssociate.get(0); return; } String accountName = this.vmssImpl.namer.getRandomName("stg", 24).replace("-", ""); Creatable<StorageAccount> storageAccountCreatable; if (this.vmssImpl.creatableGroup != null) { storageAccountCreatable = this .vmssImpl .storageManager .storageAccounts() .define(accountName) .withRegion(this.vmssImpl.regionName()) .withNewResourceGroup(this.vmssImpl.creatableGroup); } else { storageAccountCreatable = this .vmssImpl .storageManager .storageAccounts() .define(accountName) .withRegion(this.vmssImpl.regionName()) .withExistingResourceGroup(this.vmssImpl.resourceGroupName()); } this.creatableDiagnosticsStorageAccountKey = this.vmssImpl.addDependency(storageAccountCreatable); } void handleDiagnosticsSettings() { if (useManagedStorageAccount) { return; } if (this.vmssInner() == null || this.vmssInner().virtualMachineProfile() == null) { return; } DiagnosticsProfile diagnosticsProfile = this.vmssInner().virtualMachineProfile().diagnosticsProfile(); if (diagnosticsProfile == null || diagnosticsProfile.bootDiagnostics() == null || diagnosticsProfile.bootDiagnostics().storageUri() != null) { return; } boolean enableBD = ResourceManagerUtils.toPrimitiveBoolean(diagnosticsProfile.bootDiagnostics().enabled()); if (!enableBD) { return; } StorageAccount storageAccount = null; if (creatableDiagnosticsStorageAccountKey != null) { storageAccount = this.vmssImpl.taskResult(creatableDiagnosticsStorageAccountKey); } else if (this.creatableStorageAccountKey != null) { storageAccount = this.vmssImpl.taskResult(this.creatableStorageAccountKey); } else if (this.existingStorageAccountToAssociate != null) { storageAccount = this.existingStorageAccountToAssociate; } if (storageAccount == null) { throw logger .logExceptionAsError( new IllegalStateException( "Unable to retrieve expected storageAccount instance for BootDiagnostics")); } vmssInner() .virtualMachineProfile() .diagnosticsProfile() .bootDiagnostics() .withStorageUri(storageAccount.endPoints().primary().blob()); } private VirtualMachineScaleSetInner vmssInner() { return this.vmssImpl.innerModel(); } private void enableDisable(boolean enable) { if (this.vmssInner() == null || this.vmssInner().virtualMachineProfile() == null) { return; } if (this.vmssInner().virtualMachineProfile().diagnosticsProfile() == null) { this.vmssInner().virtualMachineProfile().withDiagnosticsProfile(new DiagnosticsProfile()); } if (this.vmssInner().virtualMachineProfile().diagnosticsProfile().bootDiagnostics() == null) { this .vmssInner() .virtualMachineProfile() .diagnosticsProfile() .withBootDiagnostics(new BootDiagnostics()); } if (enable) { this.vmssInner().virtualMachineProfile().diagnosticsProfile().bootDiagnostics().withEnabled(true); } else { this.vmssInner().virtualMachineProfile().diagnosticsProfile().bootDiagnostics().withEnabled(false); this.vmssInner().virtualMachineProfile().diagnosticsProfile().bootDiagnostics().withStorageUri(null); } } } }
class VirtualMachineScaleSetImpl extends GroupableParentResourceImpl< VirtualMachineScaleSet, VirtualMachineScaleSetInner, VirtualMachineScaleSetImpl, ComputeManager> implements VirtualMachineScaleSet, VirtualMachineScaleSet.DefinitionManagedOrUnmanaged, VirtualMachineScaleSet.DefinitionManaged, VirtualMachineScaleSet.DefinitionUnmanaged, VirtualMachineScaleSet.Update, VirtualMachineScaleSet.DefinitionStages.WithSystemAssignedIdentityBasedAccessOrCreate, VirtualMachineScaleSet.DefinitionStages.WithUserAssignedManagedServiceIdentity, VirtualMachineScaleSet.UpdateStages.WithSystemAssignedIdentityBasedAccessOrApply, VirtualMachineScaleSet.UpdateStages.WithUserAssignedManagedServiceIdentity { private final StorageManager storageManager; private final NetworkManager networkManager; private final IdentifierProvider namer; private boolean isMarketplaceLinuxImage = false; private String existingPrimaryNetworkSubnetNameToAssociate; private List<String> creatableStorageAccountKeys = new ArrayList<>(); private List<StorageAccount> existingStorageAccountsToAssociate = new ArrayList<>(); private Map<String, VirtualMachineScaleSetExtension> extensions; private LoadBalancer primaryInternetFacingLoadBalancer; private LoadBalancer primaryInternalLoadBalancer; private boolean removePrimaryInternetFacingLoadBalancerOnUpdate; private boolean removePrimaryInternalLoadBalancerOnUpdate; private LoadBalancer primaryInternetFacingLoadBalancerToAttachOnUpdate; private LoadBalancer primaryInternalLoadBalancerToAttachOnUpdate; private List<String> primaryInternetFacingLBBackendsToRemoveOnUpdate = new ArrayList<>(); private List<String> primaryInternetFacingLBInboundNatPoolsToRemoveOnUpdate = new ArrayList<>(); private List<String> primaryInternalLBBackendsToRemoveOnUpdate = new ArrayList<>(); private List<String> primaryInternalLBInboundNatPoolsToRemoveOnUpdate = new ArrayList<>(); private List<String> primaryInternetFacingLBBackendsToAddOnUpdate = new ArrayList<>(); private List<String> primaryInternetFacingLBInboundNatPoolsToAddOnUpdate = new ArrayList<>(); private List<String> primaryInternalLBBackendsToAddOnUpdate = new ArrayList<>(); private List<String> primaryInternalLBInboundNatPoolsToAddOnUpdate = new ArrayList<>(); private boolean isUnmanagedDiskSelected; private final ManagedDataDiskCollection managedDataDisks; VirtualMachineScaleSetMsiHandler virtualMachineScaleSetMsiHandler; private final BootDiagnosticsHandler bootDiagnosticsHandler; private String newProximityPlacementGroupName; private ProximityPlacementGroupType newProximityPlacementGroupType; private boolean removeOsProfile; private final ClientLogger logger = new ClientLogger(VirtualMachineScaleSetImpl.class); private boolean profileAttached = false; VirtualMachineScaleSetImpl( String name, VirtualMachineScaleSetInner innerModel, final ComputeManager computeManager, final StorageManager storageManager, final NetworkManager networkManager, final AuthorizationManager authorizationManager) { super(name, innerModel, computeManager); this.storageManager = storageManager; this.networkManager = networkManager; this.namer = this.manager().resourceManager().internalContext().createIdentifierProvider(this.name()); this.managedDataDisks = new ManagedDataDiskCollection(this); this.virtualMachineScaleSetMsiHandler = new VirtualMachineScaleSetMsiHandler(authorizationManager, this); this.bootDiagnosticsHandler = new BootDiagnosticsHandler(this); this.newProximityPlacementGroupName = null; this.newProximityPlacementGroupType = null; } @Override protected void initializeChildrenFromInner() { this.extensions = new HashMap<>(); if (this.innerModel() != null && this.innerModel().virtualMachineProfile() != null && this.innerModel().virtualMachineProfile().extensionProfile() != null) { if (this.innerModel().virtualMachineProfile().extensionProfile().extensions() != null) { for (VirtualMachineScaleSetExtensionInner inner : this.innerModel().virtualMachineProfile().extensionProfile().extensions()) { this.extensions.put(inner.name(), new VirtualMachineScaleSetExtensionImpl(inner, this)); } } } } @Override public VirtualMachineScaleSetVMs virtualMachines() { return new VirtualMachineScaleSetVMsImpl( this, this.manager().serviceClient().getVirtualMachineScaleSetVMs(), this.myManager); } @Override public PagedIterable<VirtualMachineScaleSetSku> listAvailableSkus() { return PagedConverter.mapPage(this .manager() .serviceClient() .getVirtualMachineScaleSets() .listSkus(this.resourceGroupName(), this.name()), VirtualMachineScaleSetSkuImpl::new); } @Override public void deallocate() { this.deallocateAsync().block(); } @Override public Mono<Void> deallocateAsync() { return this .manager() .serviceClient() .getVirtualMachineScaleSets() .deallocateAsync(this.resourceGroupName(), this.name(), null) .map(aVoid -> this.refreshAsync()) .then(); } @Override public void powerOff() { this.powerOffAsync().block(); } @Override public Mono<Void> powerOffAsync() { return this .manager() .serviceClient() .getVirtualMachineScaleSets() .powerOffAsync(this.resourceGroupName(), this.name(), null, null); } @Override public void restart() { this.restartAsync().block(); } @Override public Mono<Void> restartAsync() { return this .manager() .serviceClient() .getVirtualMachineScaleSets() .restartAsync(this.resourceGroupName(), this.name(), null); } @Override public void start() { this.startAsync().block(); } @Override public Mono<Void> startAsync() { return this .manager() .serviceClient() .getVirtualMachineScaleSets() .startAsync(this.resourceGroupName(), this.name(), null); } @Override public void reimage() { this.reimageAsync().block(); } @Override public Mono<Void> reimageAsync() { return this .manager() .serviceClient() .getVirtualMachineScaleSets() .reimageAsync(this.resourceGroupName(), this.name(), null); } @Override public RunCommandResult runPowerShellScriptInVMInstance( String vmId, List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) { return this .manager() .virtualMachineScaleSets() .runPowerShellScriptInVMInstance( this.resourceGroupName(), this.name(), vmId, scriptLines, scriptParameters); } @Override public Mono<RunCommandResult> runPowerShellScriptInVMInstanceAsync( String vmId, List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) { return this .manager() .virtualMachineScaleSets() .runPowerShellScriptInVMInstanceAsync( this.resourceGroupName(), this.name(), vmId, scriptLines, scriptParameters); } @Override public RunCommandResult runShellScriptInVMInstance( String vmId, List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) { return this .manager() .virtualMachineScaleSets() .runShellScriptInVMInstance(this.resourceGroupName(), this.name(), vmId, scriptLines, scriptParameters); } @Override public Mono<RunCommandResult> runShellScriptInVMInstanceAsync( String vmId, List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) { return this .manager() .virtualMachineScaleSets() .runShellScriptInVMInstanceAsync( this.resourceGroupName(), this.name(), vmId, scriptLines, scriptParameters); } @Override public RunCommandResult runCommandInVMInstance(String vmId, RunCommandInput inputCommand) { return this .manager() .virtualMachineScaleSets() .runCommandInVMInstance(this.resourceGroupName(), this.name(), vmId, inputCommand); } @Override public Mono<RunCommandResult> runCommandVMInstanceAsync(String vmId, RunCommandInput inputCommand) { return this .manager() .virtualMachineScaleSets() .runCommandVMInstanceAsync(this.resourceGroupName(), this.name(), vmId, inputCommand); } @Override public String computerNamePrefix() { if (this.innerModel() != null && this.innerModel().virtualMachineProfile() != null) { return this.innerModel().virtualMachineProfile().osProfile().computerNamePrefix(); } else { return null; } } @Override public OperatingSystemTypes osType() { if (this.innerModel() != null && this.innerModel().virtualMachineProfile() != null) { return this.innerModel().virtualMachineProfile().storageProfile().osDisk().osType(); } else { return null; } } @Override public CachingTypes osDiskCachingType() { if (this.innerModel() != null && this.innerModel().virtualMachineProfile() != null) { return this.innerModel().virtualMachineProfile().storageProfile().osDisk().caching(); } else { return null; } } @Override public String osDiskName() { if (this.innerModel() != null && this.innerModel().virtualMachineProfile() != null) { return this.innerModel().virtualMachineProfile().storageProfile().osDisk().name(); } else { return null; } } @Override public UpgradeMode upgradeModel() { return this.innerModel().upgradePolicy().mode(); } @Override public boolean overProvisionEnabled() { return this.innerModel().overprovision(); } @Override public VirtualMachineScaleSetSkuTypes sku() { return VirtualMachineScaleSetSkuTypes.fromSku(this.innerModel().sku()); } @Override public int capacity() { return ResourceManagerUtils.toPrimitiveInt(this.innerModel().sku().capacity()); } @Override public Network getPrimaryNetwork() throws IOException { VirtualMachineScaleSetIpConfiguration ipConfiguration = primaryNicDefaultIpConfiguration(); if (ipConfiguration == null) { return null; } String subnetId = ipConfiguration.subnet().id(); String virtualNetworkId = ResourceUtils.parentResourceIdFromResourceId(subnetId); return this.networkManager.networks().getById(virtualNetworkId); } @Override public LoadBalancer getPrimaryInternetFacingLoadBalancer() throws IOException { if (this.primaryInternetFacingLoadBalancer == null) { loadCurrentPrimaryLoadBalancersIfAvailableAsync().block(); } return this.primaryInternetFacingLoadBalancer; } @Override public Map<String, LoadBalancerBackend> listPrimaryInternetFacingLoadBalancerBackends() throws IOException { if (this.getPrimaryInternetFacingLoadBalancer() != null) { return getBackendsAssociatedWithIpConfiguration( this.primaryInternetFacingLoadBalancer, primaryNicDefaultIpConfiguration()); } return new HashMap<>(); } @Override public Map<String, LoadBalancerInboundNatPool> listPrimaryInternetFacingLoadBalancerInboundNatPools() throws IOException { if (this.getPrimaryInternetFacingLoadBalancer() != null) { return getInboundNatPoolsAssociatedWithIpConfiguration( this.primaryInternetFacingLoadBalancer, primaryNicDefaultIpConfiguration()); } return new HashMap<>(); } @Override public LoadBalancer getPrimaryInternalLoadBalancer() throws IOException { if (this.primaryInternalLoadBalancer == null) { loadCurrentPrimaryLoadBalancersIfAvailableAsync().block(); } return this.primaryInternalLoadBalancer; } @Override public Map<String, LoadBalancerBackend> listPrimaryInternalLoadBalancerBackends() throws IOException { if (this.getPrimaryInternalLoadBalancer() != null) { return getBackendsAssociatedWithIpConfiguration( this.primaryInternalLoadBalancer, primaryNicDefaultIpConfiguration()); } return new HashMap<>(); } @Override public Map<String, LoadBalancerInboundNatPool> listPrimaryInternalLoadBalancerInboundNatPools() throws IOException { if (this.getPrimaryInternalLoadBalancer() != null) { return getInboundNatPoolsAssociatedWithIpConfiguration( this.primaryInternalLoadBalancer, primaryNicDefaultIpConfiguration()); } return new HashMap<>(); } @Override public List<String> primaryPublicIpAddressIds() throws IOException { LoadBalancer loadBalancer = this.getPrimaryInternetFacingLoadBalancer(); if (loadBalancer != null) { return loadBalancer.publicIpAddressIds(); } return new ArrayList<>(); } @Override public List<String> vhdContainers() { if (this.storageProfile() != null && this.storageProfile().osDisk() != null && this.storageProfile().osDisk().vhdContainers() != null) { return this.storageProfile().osDisk().vhdContainers(); } return new ArrayList<>(); } @Override public VirtualMachineScaleSetStorageProfile storageProfile() { if (this.innerModel().virtualMachineProfile() != null) { return this.innerModel().virtualMachineProfile().storageProfile(); } else { return null; } } @Override public VirtualMachineScaleSetNetworkProfile networkProfile() { if (this.innerModel().virtualMachineProfile() != null) { return this.innerModel().virtualMachineProfile().networkProfile(); } else { return null; } } @Override public Map<String, VirtualMachineScaleSetExtension> extensions() { return Collections.unmodifiableMap(this.extensions); } @Override public VirtualMachinePriorityTypes virtualMachinePriority() { if (this.innerModel().virtualMachineProfile() != null) { return this.innerModel().virtualMachineProfile().priority(); } else { return null; } } @Override public BillingProfile billingProfile() { if (this.innerModel().virtualMachineProfile() != null) { return this.innerModel().virtualMachineProfile().billingProfile(); } else { return null; } } @Override public VirtualMachineScaleSetPublicIpAddressConfiguration virtualMachinePublicIpConfig() { VirtualMachineScaleSetIpConfiguration nicConfig = this.primaryNicDefaultIpConfiguration(); if (nicConfig == null) { return null; } return nicConfig.publicIpAddressConfiguration(); } @Override public VirtualMachineEvictionPolicyTypes virtualMachineEvictionPolicy() { if (this.innerModel() != null && this.innerModel().virtualMachineProfile() != null) { return this.innerModel().virtualMachineProfile().evictionPolicy(); } else { return null; } } @Override public boolean isIpForwardingEnabled() { VirtualMachineScaleSetNetworkConfiguration nicConfig = primaryNicConfiguration(); if (nicConfig == null || nicConfig.enableIpForwarding() == null) { return false; } return nicConfig.enableIpForwarding(); } @Override public boolean isAcceleratedNetworkingEnabled() { VirtualMachineScaleSetNetworkConfiguration nicConfig = primaryNicConfiguration(); if (nicConfig == null || nicConfig.enableAcceleratedNetworking() == null) { return false; } return nicConfig.enableAcceleratedNetworking(); } @Override public String networkSecurityGroupId() { VirtualMachineScaleSetNetworkConfiguration nicConfig = primaryNicConfiguration(); if (nicConfig != null && nicConfig.networkSecurityGroup() != null) { return nicConfig.networkSecurityGroup().id(); } else { return null; } } @Override public boolean isSinglePlacementGroupEnabled() { if (this.innerModel().singlePlacementGroup() != null) { return this.innerModel().singlePlacementGroup(); } else { return false; } } @Override @Override public List<String> applicationSecurityGroupIds() { VirtualMachineScaleSetIpConfiguration nicIpConfig = this.primaryNicDefaultIpConfiguration(); if (nicIpConfig == null) { return Collections.emptyList(); } List<String> asgIds = new ArrayList<>(); if (nicIpConfig.applicationSecurityGroups() != null) { for (SubResource asg : nicIpConfig.applicationSecurityGroups()) { asgIds.add(asg.id()); } } return asgIds; } @Override public Boolean doNotRunExtensionsOnOverprovisionedVMs() { return this.innerModel().doNotRunExtensionsOnOverprovisionedVMs(); } @Override public ProximityPlacementGroup proximityPlacementGroup() { if (innerModel().proximityPlacementGroup() == null) { return null; } else { ResourceId id = ResourceId.fromString(innerModel().proximityPlacementGroup().id()); ProximityPlacementGroupInner plgInner = manager() .serviceClient() .getProximityPlacementGroups() .getByResourceGroup(id.resourceGroupName(), id.name()); if (plgInner == null) { return null; } else { return new ProximityPlacementGroupImpl(plgInner); } } } @Override public AdditionalCapabilities additionalCapabilities() { return this.innerModel().additionalCapabilities(); } @Override public Plan plan() { return this.innerModel().plan(); } @Override public OrchestrationMode orchestrationMode() { return this.innerModel().orchestrationMode() == null ? OrchestrationMode.UNIFORM : this.innerModel().orchestrationMode(); } @Override public VirtualMachineScaleSetNetworkInterface getNetworkInterfaceByInstanceId(String instanceId, String name) { return this .networkManager .networkInterfaces() .getByVirtualMachineScaleSetInstanceId(this.resourceGroupName(), this.name(), instanceId, name); } @Override public Mono<VirtualMachineScaleSetNetworkInterface> getNetworkInterfaceByInstanceIdAsync(String instanceId, String name) { return this .networkManager .networkInterfaces() .getByVirtualMachineScaleSetInstanceIdAsync(this.resourceGroupName(), this.name(), instanceId, name); } @Override public PagedIterable<VirtualMachineScaleSetNetworkInterface> listNetworkInterfaces() { return this .networkManager .networkInterfaces() .listByVirtualMachineScaleSet(this.resourceGroupName(), this.name()); } @Override public PagedIterable<VirtualMachineScaleSetNetworkInterface> listNetworkInterfacesByInstanceId( String virtualMachineInstanceId) { return this .networkManager .networkInterfaces() .listByVirtualMachineScaleSetInstanceId(this.resourceGroupName(), this.name(), virtualMachineInstanceId); } @Override public PagedFlux<VirtualMachineScaleSetNetworkInterface> listNetworkInterfacesByInstanceIdAsync( String virtualMachineInstanceId) { return this .networkManager .networkInterfaces() .listByVirtualMachineScaleSetInstanceIdAsync( this.resourceGroupName(), this.name(), virtualMachineInstanceId); } @Override public VirtualMachineScaleSetImpl withSku(VirtualMachineScaleSetSkuTypes skuType) { this.innerModel().withSku(skuType.sku()); initVMProfileIfNecessary(); return this; } @Override public VirtualMachineScaleSetImpl withFlexibleOrchestrationMode() { return withFlexibleOrchestrationMode(1); } @Override public VirtualMachineScaleSetImpl withFlexibleOrchestrationMode(int faultDomainCount) { this.innerModel().withOrchestrationMode(OrchestrationMode.FLEXIBLE); this.innerModel().withPlatformFaultDomainCount(faultDomainCount); return this; } @Override public VirtualMachineScaleSetImpl withSku(VirtualMachineScaleSetSku sku) { return this.withSku(sku.skuType()); } @Override public VirtualMachineScaleSetImpl withExistingPrimaryNetworkSubnet(Network network, String subnetName) { initVMProfileIfNecessary(); this.existingPrimaryNetworkSubnetNameToAssociate = mergePath(network.id(), "subnets", subnetName); return this; } @Override public VirtualMachineScaleSetImpl withExistingPrimaryInternetFacingLoadBalancer(LoadBalancer loadBalancer) { if (loadBalancer.publicIpAddressIds().isEmpty()) { throw logger .logExceptionAsError( new IllegalArgumentException("Parameter loadBalancer must be an Internet facing load balancer")); } initVMProfileIfNecessary(); if (isInCreateMode()) { this.primaryInternetFacingLoadBalancer = loadBalancer; associateLoadBalancerToIpConfiguration( this.primaryInternetFacingLoadBalancer, this.primaryNicDefaultIpConfiguration()); } else { this.primaryInternetFacingLoadBalancerToAttachOnUpdate = loadBalancer; } return this; } @Override public VirtualMachineScaleSetImpl withPrimaryInternetFacingLoadBalancerBackends(String... backendNames) { initVMProfileIfNecessary(); if (this.isInCreateMode()) { VirtualMachineScaleSetIpConfiguration defaultPrimaryIpConfig = this.primaryNicDefaultIpConfiguration(); removeAllBackendAssociationFromIpConfiguration( this.primaryInternetFacingLoadBalancer, defaultPrimaryIpConfig); associateBackEndsToIpConfiguration( this.primaryInternetFacingLoadBalancer.id(), defaultPrimaryIpConfig, backendNames); } else { addToList(this.primaryInternetFacingLBBackendsToAddOnUpdate, backendNames); } return this; } @Override public VirtualMachineScaleSetImpl withPrimaryInternetFacingLoadBalancerInboundNatPools(String... natPoolNames) { initVMProfileIfNecessary(); if (this.isInCreateMode()) { VirtualMachineScaleSetIpConfiguration defaultPrimaryIpConfig = this.primaryNicDefaultIpConfiguration(); removeAllInboundNatPoolAssociationFromIpConfiguration( this.primaryInternetFacingLoadBalancer, defaultPrimaryIpConfig); associateInboundNATPoolsToIpConfiguration( this.primaryInternetFacingLoadBalancer.id(), defaultPrimaryIpConfig, natPoolNames); } else { addToList(this.primaryInternetFacingLBInboundNatPoolsToAddOnUpdate, natPoolNames); } return this; } @Override public VirtualMachineScaleSetImpl withExistingPrimaryInternalLoadBalancer(LoadBalancer loadBalancer) { if (!loadBalancer.publicIpAddressIds().isEmpty()) { throw logger .logExceptionAsError( new IllegalArgumentException("Parameter loadBalancer must be an internal load balancer")); } String lbNetworkId = null; for (LoadBalancerPrivateFrontend frontEnd : loadBalancer.privateFrontends().values()) { if (frontEnd.networkId() != null) { lbNetworkId = frontEnd.networkId(); } } initVMProfileIfNecessary(); if (isInCreateMode()) { String vmNICNetworkId = ResourceUtils.parentResourceIdFromResourceId(this.existingPrimaryNetworkSubnetNameToAssociate); if (!vmNICNetworkId.equalsIgnoreCase(lbNetworkId)) { throw logger .logExceptionAsError( new IllegalArgumentException( "Virtual network associated with scale set virtual machines" + " and internal load balancer must be same. " + "'" + vmNICNetworkId + "'" + "'" + lbNetworkId)); } this.primaryInternalLoadBalancer = loadBalancer; associateLoadBalancerToIpConfiguration( this.primaryInternalLoadBalancer, this.primaryNicDefaultIpConfiguration()); } else { String vmNicVnetId = ResourceUtils.parentResourceIdFromResourceId(primaryNicDefaultIpConfiguration().subnet().id()); if (!vmNicVnetId.equalsIgnoreCase(lbNetworkId)) { throw logger .logExceptionAsError( new IllegalArgumentException( "Virtual network associated with scale set virtual machines" + " and internal load balancer must be same. " + "'" + vmNicVnetId + "'" + "'" + lbNetworkId)); } this.primaryInternalLoadBalancerToAttachOnUpdate = loadBalancer; } return this; } @Override public VirtualMachineScaleSetImpl withPrimaryInternalLoadBalancerBackends(String... backendNames) { initVMProfileIfNecessary(); if (this.isInCreateMode()) { VirtualMachineScaleSetIpConfiguration defaultPrimaryIpConfig = primaryNicDefaultIpConfiguration(); removeAllBackendAssociationFromIpConfiguration(this.primaryInternalLoadBalancer, defaultPrimaryIpConfig); associateBackEndsToIpConfiguration( this.primaryInternalLoadBalancer.id(), defaultPrimaryIpConfig, backendNames); } else { addToList(this.primaryInternalLBBackendsToAddOnUpdate, backendNames); } return this; } @Override public VirtualMachineScaleSetImpl withPrimaryInternalLoadBalancerInboundNatPools(String... natPoolNames) { initVMProfileIfNecessary(); if (this.isInCreateMode()) { VirtualMachineScaleSetIpConfiguration defaultPrimaryIpConfig = this.primaryNicDefaultIpConfiguration(); removeAllInboundNatPoolAssociationFromIpConfiguration( this.primaryInternalLoadBalancer, defaultPrimaryIpConfig); associateInboundNATPoolsToIpConfiguration( this.primaryInternalLoadBalancer.id(), defaultPrimaryIpConfig, natPoolNames); } else { addToList(this.primaryInternalLBInboundNatPoolsToAddOnUpdate, natPoolNames); } return this; } @Override public VirtualMachineScaleSetImpl withoutPrimaryInternalLoadBalancer() { if (this.isInUpdateMode()) { this.removePrimaryInternalLoadBalancerOnUpdate = true; } return this; } @Override public VirtualMachineScaleSetImpl withoutPrimaryInternetFacingLoadBalancer() { if (this.isInUpdateMode()) { this.removePrimaryInternetFacingLoadBalancerOnUpdate = true; } return this; } @Override public VirtualMachineScaleSetImpl withoutPrimaryInternetFacingLoadBalancerBackends(String... backendNames) { addToList(this.primaryInternetFacingLBBackendsToRemoveOnUpdate, backendNames); return this; } @Override public VirtualMachineScaleSetImpl withoutPrimaryInternalLoadBalancerBackends(String... backendNames) { addToList(this.primaryInternalLBBackendsToRemoveOnUpdate, backendNames); return this; } @Override public VirtualMachineScaleSetImpl withoutPrimaryInternetFacingLoadBalancerNatPools(String... natPoolNames) { addToList(this.primaryInternetFacingLBInboundNatPoolsToRemoveOnUpdate, natPoolNames); return this; } @Override public VirtualMachineScaleSetImpl withoutPrimaryInternalLoadBalancerNatPools(String... natPoolNames) { addToList(this.primaryInternalLBInboundNatPoolsToRemoveOnUpdate, natPoolNames); return this; } @Override public VirtualMachineScaleSetImpl withPopularWindowsImage(KnownWindowsVirtualMachineImage knownImage) { return withSpecificWindowsImageVersion(knownImage.imageReference()); } @Override public VirtualMachineScaleSetImpl withLatestWindowsImage(String publisher, String offer, String sku) { ImageReference imageReference = new ImageReference().withPublisher(publisher).withOffer(offer).withSku(sku).withVersion("latest"); return withSpecificWindowsImageVersion(imageReference); } @Override public VirtualMachineScaleSetImpl withSpecificWindowsImageVersion(ImageReference imageReference) { initVMProfileIfNecessary(); this .innerModel() .virtualMachineProfile() .storageProfile() .osDisk() .withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().virtualMachineProfile().storageProfile().withImageReference(imageReference); this.innerModel().virtualMachineProfile().osProfile().withWindowsConfiguration(new WindowsConfiguration()); this.innerModel().virtualMachineProfile().osProfile().windowsConfiguration().withProvisionVMAgent(true); this.innerModel().virtualMachineProfile().osProfile().windowsConfiguration().withEnableAutomaticUpdates(true); return this; } @Override public VirtualMachineScaleSetImpl withGeneralizedWindowsCustomImage(String customImageId) { initVMProfileIfNecessary(); ImageReference imageReferenceInner = new ImageReference(); imageReferenceInner.withId(customImageId); this .innerModel() .virtualMachineProfile() .storageProfile() .osDisk() .withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().virtualMachineProfile().storageProfile().withImageReference(imageReferenceInner); this.innerModel().virtualMachineProfile().osProfile().withWindowsConfiguration(new WindowsConfiguration()); this.innerModel().virtualMachineProfile().osProfile().windowsConfiguration().withProvisionVMAgent(true); this.innerModel().virtualMachineProfile().osProfile().windowsConfiguration().withEnableAutomaticUpdates(true); return this; } @Override public VirtualMachineScaleSetImpl withSpecializedWindowsCustomImage(String customImageId) { this.withGeneralizedWindowsCustomImage(customImageId); this.removeOsProfile = true; return this; } @Override public VirtualMachineScaleSetImpl withStoredWindowsImage(String imageUrl) { initVMProfileIfNecessary(); VirtualHardDisk userImageVhd = new VirtualHardDisk(); userImageVhd.withUri(imageUrl); this .innerModel() .virtualMachineProfile() .storageProfile() .osDisk() .withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().virtualMachineProfile().storageProfile().osDisk().withImage(userImageVhd); this.innerModel().virtualMachineProfile().storageProfile().osDisk().withOsType(OperatingSystemTypes.WINDOWS); this.innerModel().virtualMachineProfile().osProfile().withWindowsConfiguration(new WindowsConfiguration()); this.innerModel().virtualMachineProfile().osProfile().windowsConfiguration().withProvisionVMAgent(true); this.innerModel().virtualMachineProfile().osProfile().windowsConfiguration().withEnableAutomaticUpdates(true); return this; } @Override public VirtualMachineScaleSetImpl withPopularLinuxImage(KnownLinuxVirtualMachineImage knownImage) { return withSpecificLinuxImageVersion(knownImage.imageReference()); } @Override public VirtualMachineScaleSetImpl withLatestLinuxImage(String publisher, String offer, String sku) { ImageReference imageReference = new ImageReference().withPublisher(publisher).withOffer(offer).withSku(sku).withVersion("latest"); return withSpecificLinuxImageVersion(imageReference); } @Override public VirtualMachineScaleSetImpl withSpecificLinuxImageVersion(ImageReference imageReference) { initVMProfileIfNecessary(); this .innerModel() .virtualMachineProfile() .storageProfile() .osDisk() .withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().virtualMachineProfile().storageProfile().withImageReference(imageReference); this.innerModel().virtualMachineProfile().osProfile().withLinuxConfiguration(new LinuxConfiguration()); this.isMarketplaceLinuxImage = true; return this; } @Override public VirtualMachineScaleSetImpl withGeneralizedLinuxCustomImage(String customImageId) { initVMProfileIfNecessary(); ImageReference imageReferenceInner = new ImageReference(); imageReferenceInner.withId(customImageId); this .innerModel() .virtualMachineProfile() .storageProfile() .osDisk() .withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().virtualMachineProfile().storageProfile().withImageReference(imageReferenceInner); this.innerModel().virtualMachineProfile().osProfile().withLinuxConfiguration(new LinuxConfiguration()); this.isMarketplaceLinuxImage = true; return this; } @Override public VirtualMachineScaleSetImpl withSpecializedLinuxCustomImage(String customImageId) { this.withGeneralizedLinuxCustomImage(customImageId); this.removeOsProfile = true; return this; } @Override public VirtualMachineScaleSetImpl withStoredLinuxImage(String imageUrl) { initVMProfileIfNecessary(); VirtualHardDisk userImageVhd = new VirtualHardDisk(); userImageVhd.withUri(imageUrl); this .innerModel() .virtualMachineProfile() .storageProfile() .osDisk() .withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().virtualMachineProfile().storageProfile().osDisk().withImage(userImageVhd); this.innerModel().virtualMachineProfile().storageProfile().osDisk().withOsType(OperatingSystemTypes.LINUX); this.innerModel().virtualMachineProfile().osProfile().withLinuxConfiguration(new LinuxConfiguration()); return this; } @Override public VirtualMachineScaleSetImpl withAdminUsername(String adminUserName) { initVMProfileIfNecessary(); this.innerModel().virtualMachineProfile().osProfile().withAdminUsername(adminUserName); return this; } @Override public VirtualMachineScaleSetImpl withRootUsername(String adminUserName) { initVMProfileIfNecessary(); this.innerModel().virtualMachineProfile().osProfile().withAdminUsername(adminUserName); return this; } @Override public VirtualMachineScaleSetImpl withAdminPassword(String password) { initVMProfileIfNecessary(); this.innerModel().virtualMachineProfile().osProfile().withAdminPassword(password); return this; } @Override public VirtualMachineScaleSetImpl withRootPassword(String password) { initVMProfileIfNecessary(); this.innerModel().virtualMachineProfile().osProfile().withAdminPassword(password); return this; } @Override public VirtualMachineScaleSetImpl withSsh(String publicKeyData) { initVMProfileIfNecessary(); VirtualMachineScaleSetOSProfile osProfile = this.innerModel().virtualMachineProfile().osProfile(); if (osProfile.linuxConfiguration().ssh() == null) { SshConfiguration sshConfiguration = new SshConfiguration(); sshConfiguration.withPublicKeys(new ArrayList<SshPublicKey>()); osProfile.linuxConfiguration().withSsh(sshConfiguration); } SshPublicKey sshPublicKey = new SshPublicKey(); sshPublicKey.withKeyData(publicKeyData); sshPublicKey.withPath("/home/" + osProfile.adminUsername() + "/.ssh/authorized_keys"); osProfile.linuxConfiguration().ssh().publicKeys().add(sshPublicKey); return this; } @Override public VirtualMachineScaleSetImpl withVMAgent() { initVMProfileIfNecessary(); this.innerModel().virtualMachineProfile().osProfile().windowsConfiguration().withProvisionVMAgent(true); return this; } @Override public VirtualMachineScaleSetImpl withoutVMAgent() { initVMProfileIfNecessary(); this.innerModel().virtualMachineProfile().osProfile().windowsConfiguration().withProvisionVMAgent(false); return this; } @Override public VirtualMachineScaleSetImpl withAutoUpdate() { initVMProfileIfNecessary(); this.innerModel().virtualMachineProfile().osProfile().windowsConfiguration().withEnableAutomaticUpdates(true); return this; } @Override public VirtualMachineScaleSetImpl withoutAutoUpdate() { initVMProfileIfNecessary(); this.innerModel().virtualMachineProfile().osProfile().windowsConfiguration().withEnableAutomaticUpdates(false); return this; } @Override public VirtualMachineScaleSetImpl withTimeZone(String timeZone) { initVMProfileIfNecessary(); this.innerModel().virtualMachineProfile().osProfile().windowsConfiguration().withTimeZone(timeZone); return this; } @Override public VirtualMachineScaleSetImpl withWinRM(WinRMListener listener) { initVMProfileIfNecessary(); if (this.innerModel().virtualMachineProfile().osProfile().windowsConfiguration().winRM() == null) { WinRMConfiguration winRMConfiguration = new WinRMConfiguration(); this.innerModel().virtualMachineProfile().osProfile().windowsConfiguration().withWinRM(winRMConfiguration); } this.innerModel().virtualMachineProfile().osProfile().windowsConfiguration().winRM().listeners().add(listener); return this; } @Override public VirtualMachineScaleSetImpl withOSDiskCaching(CachingTypes cachingType) { initVMProfileIfNecessary(); this.innerModel().virtualMachineProfile().storageProfile().osDisk().withCaching(cachingType); return this; } @Override public VirtualMachineScaleSetImpl withOSDiskName(String name) { initVMProfileIfNecessary(); this.innerModel().virtualMachineProfile().storageProfile().osDisk().withName(name); return this; } @Override public VirtualMachineScaleSetImpl withComputerNamePrefix(String namePrefix) { initVMProfileIfNecessary(); this.innerModel().virtualMachineProfile().osProfile().withComputerNamePrefix(namePrefix); return this; } @Override public VirtualMachineScaleSetImpl withUpgradeMode(UpgradeMode upgradeMode) { if (this.innerModel().upgradePolicy() == null) { this.innerModel().withUpgradePolicy(new UpgradePolicy()); } this.innerModel().upgradePolicy().withMode(upgradeMode); return this; } @Override public VirtualMachineScaleSetImpl withOverProvision(boolean enabled) { this.innerModel().withOverprovision(enabled); return this; } @Override public VirtualMachineScaleSetImpl withOverProvisioning() { return this.withOverProvision(true); } @Override public VirtualMachineScaleSetImpl withoutOverProvisioning() { return this.withOverProvision(false); } @Override public VirtualMachineScaleSetImpl withCapacity(long capacity) { this.innerModel().sku().withCapacity(capacity); return this; } @Override public VirtualMachineScaleSetImpl withNewStorageAccount(String name) { StorageAccount.DefinitionStages.WithGroup definitionWithGroup = this.storageManager.storageAccounts().define(name).withRegion(this.regionName()); Creatable<StorageAccount> definitionAfterGroup; if (this.creatableGroup != null) { definitionAfterGroup = definitionWithGroup.withNewResourceGroup(this.creatableGroup); } else { definitionAfterGroup = definitionWithGroup.withExistingResourceGroup(this.resourceGroupName()); } return withNewStorageAccount(definitionAfterGroup); } @Override public VirtualMachineScaleSetImpl withNewStorageAccount(Creatable<StorageAccount> creatable) { this.creatableStorageAccountKeys.add(this.addDependency(creatable)); return this; } @Override public VirtualMachineScaleSetImpl withExistingStorageAccount(StorageAccount storageAccount) { this.existingStorageAccountsToAssociate.add(storageAccount); return this; } @Override public VirtualMachineScaleSetImpl withCustomData(String base64EncodedCustomData) { initVMProfileIfNecessary(); this.innerModel().virtualMachineProfile().osProfile().withCustomData(base64EncodedCustomData); return this; } @Override public VirtualMachineScaleSetImpl withSecrets(List<VaultSecretGroup> secrets) { initVMProfileIfNecessary(); this.innerModel().virtualMachineProfile().osProfile().withSecrets(secrets); return this; } @Override public VirtualMachineScaleSetImpl withoutSecrets() { initVMProfileIfNecessary(); this.innerModel().virtualMachineProfile().osProfile().withSecrets(new ArrayList<VaultSecretGroup>()); return this; } @Override public VirtualMachineScaleSetExtensionImpl defineNewExtension(String name) { return new VirtualMachineScaleSetExtensionImpl(new VirtualMachineScaleSetExtensionInner().withName(name), this); } protected VirtualMachineScaleSetImpl withExtension(VirtualMachineScaleSetExtensionImpl extension) { this.extensions.put(extension.name(), extension); return this; } @Override public VirtualMachineScaleSetExtensionImpl updateExtension(String name) { return (VirtualMachineScaleSetExtensionImpl) this.extensions.get(name); } @Override public VirtualMachineScaleSetImpl withoutExtension(String name) { if (this.extensions.containsKey(name)) { this.extensions.remove(name); } return this; } @Override public boolean isManagedDiskEnabled() { if (this.innerModel() == null || this.innerModel().virtualMachineProfile() == null) { return false; } VirtualMachineScaleSetStorageProfile storageProfile = this.innerModel().virtualMachineProfile().storageProfile(); if (isOsDiskFromCustomImage(storageProfile)) { return true; } if (isOSDiskFromStoredImage(storageProfile)) { return false; } if (isOSDiskFromPlatformImage(storageProfile)) { if (this.isUnmanagedDiskSelected) { return false; } } if (isInCreateMode()) { return true; } else { List<String> vhdContainers = storageProfile.osDisk().vhdContainers(); return vhdContainers == null || vhdContainers.size() == 0; } } @Override public boolean isManagedServiceIdentityEnabled() { ResourceIdentityType type = this.managedServiceIdentityType(); return type != null && !type.equals(ResourceIdentityType.NONE); } @Override public String systemAssignedManagedServiceIdentityTenantId() { if (this.innerModel().identity() != null) { return this.innerModel().identity().tenantId(); } return null; } @Override public String systemAssignedManagedServiceIdentityPrincipalId() { if (this.innerModel().identity() != null) { return this.innerModel().identity().principalId(); } return null; } @Override public ResourceIdentityType managedServiceIdentityType() { if (this.innerModel().identity() != null) { return this.innerModel().identity().type(); } return null; } @Override public Set<String> userAssignedManagedServiceIdentityIds() { if (this.innerModel().identity() != null && this.innerModel().identity().userAssignedIdentities() != null) { return Collections .unmodifiableSet(new HashSet<String>(this.innerModel().identity().userAssignedIdentities().keySet())); } return Collections.unmodifiableSet(new HashSet<String>()); } @Override public Set<AvailabilityZoneId> availabilityZones() { Set<AvailabilityZoneId> zones = new HashSet<>(); if (this.innerModel().zones() != null) { for (String zone : this.innerModel().zones()) { zones.add(AvailabilityZoneId.fromString(zone)); } } return Collections.unmodifiableSet(zones); } @Override public boolean isBootDiagnosticsEnabled() { return this.bootDiagnosticsHandler.isBootDiagnosticsEnabled(); } @Override public String bootDiagnosticsStorageUri() { return this.bootDiagnosticsHandler.bootDiagnosticsStorageUri(); } @Override public StorageAccountTypes managedOSDiskStorageAccountType() { if (this.innerModel().virtualMachineProfile() != null && this.innerModel().virtualMachineProfile().storageProfile() != null && this.innerModel().virtualMachineProfile().storageProfile().osDisk() != null && this.innerModel().virtualMachineProfile().storageProfile().osDisk().managedDisk() != null) { return this .innerModel() .virtualMachineProfile() .storageProfile() .osDisk() .managedDisk() .storageAccountType(); } return null; } @Override public VirtualMachineScaleSetImpl withUnmanagedDisks() { this.isUnmanagedDiskSelected = true; return this; } @Override public VirtualMachineScaleSetImpl withNewDataDisk(int sizeInGB) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VMSS_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); this .managedDataDisks .implicitDisksToAssociate .add(new VirtualMachineScaleSetDataDisk().withLun(-1).withDiskSizeGB(sizeInGB)); return this; } @Override public VirtualMachineScaleSetImpl withNewDataDisk(int sizeInGB, int lun, CachingTypes cachingType) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VMSS_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); this .managedDataDisks .implicitDisksToAssociate .add(new VirtualMachineScaleSetDataDisk().withLun(lun).withDiskSizeGB(sizeInGB).withCaching(cachingType)); return this; } @Override public VirtualMachineScaleSetImpl withNewDataDisk( int sizeInGB, int lun, CachingTypes cachingType, StorageAccountTypes storageAccountType) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VMSS_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); VirtualMachineScaleSetManagedDiskParameters managedDiskParameters = new VirtualMachineScaleSetManagedDiskParameters(); managedDiskParameters.withStorageAccountType(storageAccountType); this .managedDataDisks .implicitDisksToAssociate .add( new VirtualMachineScaleSetDataDisk() .withLun(lun) .withDiskSizeGB(sizeInGB) .withCaching(cachingType) .withManagedDisk(managedDiskParameters)); return this; } @Override public VirtualMachineScaleSetImpl withoutDataDisk(int lun) { if (!isManagedDiskEnabled()) { return this; } this.managedDataDisks.diskLunsToRemove.add(lun); return this; } /* TODO: Broken by change in Azure API behavior @Override public VirtualMachineScaleSetImpl withDataDiskUpdated(int lun, int newSizeInGB) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VMSS_NO_MANAGED_DISK_TO_UPDATE); VirtualMachineScaleSetDataDisk dataDisk = getDataDiskInner(lun); if (dataDisk == null) { throw new RuntimeException(String.format("A data disk with lun '%d' not found", lun)); } dataDisk .withDiskSizeGB(newSizeInGB); return this; } @Override public VirtualMachineScaleSetImpl withDataDiskUpdated(int lun, int newSizeInGB, CachingTypes cachingType) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VMSS_NO_MANAGED_DISK_TO_UPDATE); VirtualMachineScaleSetDataDisk dataDisk = getDataDiskInner(lun); if (dataDisk == null) { throw new RuntimeException(String.format("A data disk with lun '%d' not found", lun)); } dataDisk .withDiskSizeGB(newSizeInGB) .withCaching(cachingType); return this; } @Override public VirtualMachineScaleSetImpl withDataDiskUpdated(int lun, int newSizeInGB, CachingTypes cachingType, StorageAccountTypes storageAccountType) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VMSS_NO_MANAGED_DISK_TO_UPDATE); VirtualMachineScaleSetDataDisk dataDisk = getDataDiskInner(lun); if (dataDisk == null) { throw new RuntimeException(String.format("A data disk with lun '%d' not found", lun)); } dataDisk .withDiskSizeGB(newSizeInGB) .withCaching(cachingType) .managedDisk() .withStorageAccountType(storageAccountType); return this; } private VirtualMachineScaleSetDataDisk getDataDiskInner(int lun) { VirtualMachineScaleSetStorageProfile storageProfile = this .inner() .virtualMachineProfile() .storageProfile(); List<VirtualMachineScaleSetDataDisk> dataDisks = storageProfile .dataDisks(); if (dataDisks == null) { return null; } for (VirtualMachineScaleSetDataDisk dataDisk : dataDisks) { if (dataDisk.lun() == lun) { return dataDisk; } } return null; } */ @Override public VirtualMachineScaleSetImpl withNewDataDiskFromImage(int imageLun) { this.managedDataDisks.newDisksFromImage.add(new VirtualMachineScaleSetDataDisk().withLun(imageLun)); return this; } @Override public VirtualMachineScaleSetImpl withNewDataDiskFromImage( int imageLun, int newSizeInGB, CachingTypes cachingType) { this .managedDataDisks .newDisksFromImage .add( new VirtualMachineScaleSetDataDisk() .withLun(imageLun) .withDiskSizeGB(newSizeInGB) .withCaching(cachingType)); return this; } @Override public VirtualMachineScaleSetImpl withNewDataDiskFromImage( int imageLun, int newSizeInGB, CachingTypes cachingType, StorageAccountTypes storageAccountType) { VirtualMachineScaleSetManagedDiskParameters managedDiskParameters = new VirtualMachineScaleSetManagedDiskParameters(); managedDiskParameters.withStorageAccountType(storageAccountType); this .managedDataDisks .newDisksFromImage .add( new VirtualMachineScaleSetDataDisk() .withLun(imageLun) .withDiskSizeGB(newSizeInGB) .withManagedDisk(managedDiskParameters) .withCaching(cachingType)); return this; } @Override public VirtualMachineScaleSetImpl withOSDiskStorageAccountType(StorageAccountTypes accountType) { initVMProfileIfNecessary(); this .innerModel() .virtualMachineProfile() .storageProfile() .osDisk() .withManagedDisk(new VirtualMachineScaleSetManagedDiskParameters().withStorageAccountType(accountType)); return this; } @Override public VirtualMachineScaleSetImpl withDataDiskDefaultCachingType(CachingTypes cachingType) { this.managedDataDisks.setDefaultCachingType(cachingType); return this; } @Override public VirtualMachineScaleSetImpl withDataDiskDefaultStorageAccountType(StorageAccountTypes storageAccountType) { this.managedDataDisks.setDefaultStorageAccountType(storageAccountType); return this; } @Override public VirtualMachineScaleSetImpl withSystemAssignedManagedServiceIdentity() { this.virtualMachineScaleSetMsiHandler.withLocalManagedServiceIdentity(); return this; } @Override public VirtualMachineScaleSetImpl withoutSystemAssignedManagedServiceIdentity() { this.virtualMachineScaleSetMsiHandler.withoutLocalManagedServiceIdentity(); return this; } @Override public VirtualMachineScaleSetImpl withSystemAssignedIdentityBasedAccessTo(String resourceId, BuiltInRole role) { this.virtualMachineScaleSetMsiHandler.withAccessTo(resourceId, role); return this; } @Override public VirtualMachineScaleSetImpl withSystemAssignedIdentityBasedAccessToCurrentResourceGroup(BuiltInRole asRole) { this.virtualMachineScaleSetMsiHandler.withAccessToCurrentResourceGroup(asRole); return this; } @Override public VirtualMachineScaleSetImpl withSystemAssignedIdentityBasedAccessTo(String scope, String roleDefinitionId) { this.virtualMachineScaleSetMsiHandler.withAccessTo(scope, roleDefinitionId); return this; } @Override public VirtualMachineScaleSetImpl withSystemAssignedIdentityBasedAccessToCurrentResourceGroup( String roleDefinitionId) { this.virtualMachineScaleSetMsiHandler.withAccessToCurrentResourceGroup(roleDefinitionId); return this; } @Override public VirtualMachineScaleSetImpl withNewUserAssignedManagedServiceIdentity(Creatable<Identity> creatableIdentity) { this.virtualMachineScaleSetMsiHandler.withNewExternalManagedServiceIdentity(creatableIdentity); return this; } @Override public VirtualMachineScaleSetImpl withExistingUserAssignedManagedServiceIdentity(Identity identity) { this.virtualMachineScaleSetMsiHandler.withExistingExternalManagedServiceIdentity(identity); return this; } @Override public VirtualMachineScaleSetImpl withoutUserAssignedManagedServiceIdentity(String identityId) { this.virtualMachineScaleSetMsiHandler.withoutExternalManagedServiceIdentity(identityId); return this; } @Override protected void beforeCreating() { setExtensions(); } @Override protected Mono<VirtualMachineScaleSetInner> createInner() { if (this.orchestrationMode() == OrchestrationMode.FLEXIBLE && this.innerModel().sku() == null) { return createInnerNoProfile(); } if (this.shouldSetProfileDefaults()) { this.setOSProfileDefaults(); this.setOSDiskDefault(); } this.setPrimaryIpConfigurationSubnet(); return this .setPrimaryIpConfigurationBackendsAndInboundNatPoolsAsync() .flatMap( virtualMachineScaleSet -> { if (isManagedDiskEnabled()) { this.managedDataDisks.setDataDisksDefaults(); } else { List<VirtualMachineScaleSetDataDisk> dataDisks = this.innerModel().virtualMachineProfile().storageProfile().dataDisks(); VirtualMachineScaleSetUnmanagedDataDiskImpl.setDataDisksDefaults(dataDisks, this.name()); } this.handleUnManagedOSDiskContainers(); this.bootDiagnosticsHandler.handleDiagnosticsSettings(); this.virtualMachineScaleSetMsiHandler.processCreatedExternalIdentities(); this.virtualMachineScaleSetMsiHandler.handleExternalIdentities(); this.createNewProximityPlacementGroup(); this.adjustProfileForFlexibleMode(); return this .manager() .serviceClient() .getVirtualMachineScaleSets() .createOrUpdateAsync(resourceGroupName(), name(), innerModel()); }); } @Override protected void afterCreating() { this.clearCachedProperties(); this.virtualMachineScaleSetMsiHandler.clear(); } @Override public Mono<VirtualMachineScaleSet> updateResourceAsync() { final VirtualMachineScaleSetImpl self = this; if (this.orchestrationMode() == OrchestrationMode.FLEXIBLE && this.innerModel().virtualMachineProfile() == null) { return updateResourceAsyncNoProfile(self); } setExtensions(); if (this.shouldSetProfileDefaults()) { this.setOSProfileDefaults(); this.setOSDiskDefault(); } this.setPrimaryIpConfigurationSubnet(); return this .setPrimaryIpConfigurationBackendsAndInboundNatPoolsAsync() .map( virtualMachineScaleSet -> { if (isManagedDiskEnabled()) { this.managedDataDisks.setDataDisksDefaults(); } else if (this.innerModel() != null && this.innerModel().virtualMachineProfile() != null) { List<VirtualMachineScaleSetDataDisk> dataDisks = this.innerModel().virtualMachineProfile().storageProfile().dataDisks(); VirtualMachineScaleSetUnmanagedDataDiskImpl.setDataDisksDefaults(dataDisks, this.name()); } this.handleUnManagedOSDiskContainers(); this.bootDiagnosticsHandler.handleDiagnosticsSettings(); this.virtualMachineScaleSetMsiHandler.processCreatedExternalIdentities(); this.adjustProfileForFlexibleMode(); VirtualMachineScaleSetUpdate updateParameter = VMSSPatchPayload.preparePatchPayload(this); this.virtualMachineScaleSetMsiHandler.handleExternalIdentities(updateParameter); return updateParameter; }) .flatMap( updateParameter -> this .manager() .serviceClient() .getVirtualMachineScaleSets() .updateAsync(resourceGroupName(), name(), updateParameter) .map( vmssInner -> { setInner(vmssInner); self.clearCachedProperties(); self.initializeChildrenFromInner(); self.virtualMachineScaleSetMsiHandler.clear(); return self; })); } @Override public Mono<VirtualMachineScaleSet> refreshAsync() { return super .refreshAsync() .map( scaleSet -> { VirtualMachineScaleSetImpl impl = (VirtualMachineScaleSetImpl) scaleSet; impl.clearCachedProperties(); impl.initializeChildrenFromInner(); return impl; }); } @Override protected Mono<VirtualMachineScaleSetInner> getInnerAsync() { return this .manager() .serviceClient() .getVirtualMachineScaleSets() .getByResourceGroupAsync(this.resourceGroupName(), this.name()); } private void adjustProfileForFlexibleMode() { if (this.orchestrationMode() == OrchestrationMode.FLEXIBLE) { if (this.innerModel().virtualMachineProfile().networkProfile().networkInterfaceConfigurations() != null) { this.innerModel().virtualMachineProfile().networkProfile().networkInterfaceConfigurations().forEach(virtualMachineScaleSetNetworkConfiguration -> { if (virtualMachineScaleSetNetworkConfiguration.ipConfigurations() != null) { virtualMachineScaleSetNetworkConfiguration.ipConfigurations().forEach(virtualMachineScaleSetIpConfiguration -> { virtualMachineScaleSetIpConfiguration.withLoadBalancerInboundNatPools(null); }); } }); } this.innerModel() .withUpgradePolicy(null) .virtualMachineProfile().networkProfile() .withNetworkApiVersion(NetworkApiVersion.TWO_ZERO_TWO_ZERO_ONE_ONE_ZERO_ONE); } } private Mono<VirtualMachineScaleSetInner> createInnerNoProfile() { this.innerModel().withVirtualMachineProfile(null); return manager() .serviceClient() .getVirtualMachineScaleSets() .createOrUpdateAsync(resourceGroupName(), name(), innerModel()); } private Mono<VirtualMachineScaleSet> updateResourceAsyncNoProfile(VirtualMachineScaleSetImpl self) { return manager() .serviceClient() .getVirtualMachineScaleSets() .updateAsync(resourceGroupName(), name(), VMSSPatchPayload.preparePatchPayload(this)) .map( vmssInner -> { setInner(vmssInner); self.clearCachedProperties(); self.initializeChildrenFromInner(); self.virtualMachineScaleSetMsiHandler.clear(); return self; }); } private void initVMProfileIfNecessary() { if (this.innerModel().virtualMachineProfile() == null) { this.innerModel().withVirtualMachineProfile(initDefaultVMProfile()); this.profileAttached = true; } } private VirtualMachineScaleSetVMProfile initDefaultVMProfile() { VirtualMachineScaleSetImpl impl = (VirtualMachineScaleSetImpl) this.manager() .virtualMachineScaleSets() .define(this.name()); if (this.orchestrationMode() == OrchestrationMode.FLEXIBLE) { if (this.innerModel().platformFaultDomainCount() != null) { impl.withFlexibleOrchestrationMode(this.innerModel().platformFaultDomainCount()); } else { impl.withFlexibleOrchestrationMode(); } } return impl.innerModel().virtualMachineProfile(); } private boolean isInUpdateMode() { return !this.isInCreateMode(); } private void setOSProfileDefaults() { if (this.innerModel().sku().capacity() == null) { this.withCapacity(2); } if (this.innerModel().upgradePolicy() == null || this.innerModel().upgradePolicy().mode() == null) { this.innerModel().withUpgradePolicy(new UpgradePolicy().withMode(UpgradeMode.AUTOMATIC)); } VirtualMachineScaleSetOSProfile osProfile = this.innerModel().virtualMachineProfile().osProfile(); VirtualMachineScaleSetOSDisk osDisk = this.innerModel().virtualMachineProfile().storageProfile().osDisk(); if (!removeOsProfile && isOSDiskFromImage(osDisk)) { if (this.osType() == OperatingSystemTypes.LINUX || this.isMarketplaceLinuxImage) { if (osProfile.linuxConfiguration() == null) { osProfile.withLinuxConfiguration(new LinuxConfiguration()); } osProfile.linuxConfiguration().withDisablePasswordAuthentication(osProfile.adminPassword() == null); } if (this.computerNamePrefix() == null) { if (this.name().matches("[0-9]+")) { withComputerNamePrefix(this.namer.getRandomName("vmss-vm", 12)); } else if (this.name().length() <= 12) { withComputerNamePrefix(this.name() + "-vm"); } else { withComputerNamePrefix(this.namer.getRandomName("vmss-vm", 12)); } } } else { this.innerModel().virtualMachineProfile().withOsProfile(null); } } private void setOSDiskDefault() { VirtualMachineScaleSetStorageProfile storageProfile = this.innerModel().virtualMachineProfile().storageProfile(); VirtualMachineScaleSetOSDisk osDisk = storageProfile.osDisk(); if (isOSDiskFromImage(osDisk)) { if (isManagedDiskEnabled()) { if (osDisk.managedDisk() == null) { osDisk.withManagedDisk(new VirtualMachineScaleSetManagedDiskParameters()); } if (osDisk.managedDisk().storageAccountType() == null) { osDisk.managedDisk().withStorageAccountType(StorageAccountTypes.STANDARD_LRS); } osDisk.withVhdContainers(null); } else { osDisk.withManagedDisk(null); if (osDisk.name() == null) { withOSDiskName(this.name() + "-os-disk"); } } } if (this.osDiskCachingType() == null) { withOSDiskCaching(CachingTypes.READ_WRITE); } } /* * Profile defaults should be set when: * 1. creating vmss * 2. attaching a profile to existing flexible vmss * @return */ private boolean shouldSetProfileDefaults() { return isInCreateMode() || (this.orchestrationMode() == OrchestrationMode.FLEXIBLE && this.profileAttached); } private void setExtensions() { if (this.extensions.size() > 0 && this.innerModel() != null && this.innerModel().virtualMachineProfile() != null) { this .innerModel() .virtualMachineProfile() .withExtensionProfile(new VirtualMachineScaleSetExtensionProfile()) .extensionProfile() .withExtensions(innersFromWrappers(this.extensions.values())); } } @Override public void beforeGroupCreateOrUpdate() { this.prepareOSDiskContainers(); this.bootDiagnosticsHandler.prepare(); } protected void prepareOSDiskContainers() { if (this.innerModel() == null || this.innerModel().virtualMachineProfile() == null || isManagedDiskEnabled()) { return; } final VirtualMachineScaleSetStorageProfile storageProfile = innerModel().virtualMachineProfile().storageProfile(); if (isOSDiskFromStoredImage(storageProfile)) { return; } if (this.isInCreateMode() && this.creatableStorageAccountKeys.isEmpty() && this.existingStorageAccountsToAssociate.isEmpty()) { String accountName = this.namer.getRandomName("stg", 24).replace("-", ""); Creatable<StorageAccount> storageAccountCreatable; if (this.creatableGroup != null) { storageAccountCreatable = this .storageManager .storageAccounts() .define(accountName) .withRegion(this.regionName()) .withNewResourceGroup(this.creatableGroup); } else { storageAccountCreatable = this .storageManager .storageAccounts() .define(accountName) .withRegion(this.regionName()) .withExistingResourceGroup(this.resourceGroupName()); } this.creatableStorageAccountKeys.add(this.addDependency(storageAccountCreatable)); } } private void handleUnManagedOSDiskContainers() { if (this.innerModel() == null || this.innerModel().virtualMachineProfile() == null) { return; } final VirtualMachineScaleSetStorageProfile storageProfile = innerModel().virtualMachineProfile().storageProfile(); if (isManagedDiskEnabled()) { storageProfile.osDisk().withVhdContainers(null); return; } if (isOSDiskFromStoredImage(storageProfile)) { storageProfile.osDisk().vhdContainers().clear(); return; } String containerName = null; for (String containerUrl : storageProfile.osDisk().vhdContainers()) { containerName = containerUrl.substring(containerUrl.lastIndexOf("/") + 1); break; } if (containerName == null) { containerName = "vhds"; } if (isInCreateMode() && this.creatableStorageAccountKeys.isEmpty() && this.existingStorageAccountsToAssociate.isEmpty()) { throw logger .logExceptionAsError( new IllegalStateException("Expected storage account(s) for VMSS OS disk containers not found")); } for (String storageAccountKey : this.creatableStorageAccountKeys) { StorageAccount storageAccount = this.<StorageAccount>taskResult(storageAccountKey); storageProfile .osDisk() .vhdContainers() .add(mergePath(storageAccount.endPoints().primary().blob(), containerName)); } for (StorageAccount storageAccount : this.existingStorageAccountsToAssociate) { storageProfile .osDisk() .vhdContainers() .add(mergePath(storageAccount.endPoints().primary().blob(), containerName)); } this.creatableStorageAccountKeys.clear(); this.existingStorageAccountsToAssociate.clear(); } private void setPrimaryIpConfigurationSubnet() { if (isInUpdateMode()) { return; } initVMProfileIfNecessary(); VirtualMachineScaleSetIpConfiguration ipConfig = this.primaryNicDefaultIpConfiguration(); ipConfig.withSubnet(new ApiEntityReference().withId(this.existingPrimaryNetworkSubnetNameToAssociate)); this.existingPrimaryNetworkSubnetNameToAssociate = null; } private Mono<VirtualMachineScaleSetImpl> setPrimaryIpConfigurationBackendsAndInboundNatPoolsAsync() { if (isInCreateMode()) { return Mono.just(this); } try { return this .loadCurrentPrimaryLoadBalancersIfAvailableAsync() .map( virtualMachineScaleSet -> { initVMProfileIfNecessary(); VirtualMachineScaleSetIpConfiguration primaryIpConfig = primaryNicDefaultIpConfiguration(); if (this.primaryInternetFacingLoadBalancer != null) { removeBackendsFromIpConfiguration( this.primaryInternetFacingLoadBalancer.id(), primaryIpConfig, this.primaryInternetFacingLBBackendsToRemoveOnUpdate.toArray(new String[0])); associateBackEndsToIpConfiguration( primaryInternetFacingLoadBalancer.id(), primaryIpConfig, this.primaryInternetFacingLBBackendsToAddOnUpdate.toArray(new String[0])); removeInboundNatPoolsFromIpConfiguration( this.primaryInternetFacingLoadBalancer.id(), primaryIpConfig, this.primaryInternetFacingLBInboundNatPoolsToRemoveOnUpdate.toArray(new String[0])); associateInboundNATPoolsToIpConfiguration( primaryInternetFacingLoadBalancer.id(), primaryIpConfig, this.primaryInternetFacingLBInboundNatPoolsToAddOnUpdate.toArray(new String[0])); } if (this.primaryInternalLoadBalancer != null) { removeBackendsFromIpConfiguration( this.primaryInternalLoadBalancer.id(), primaryIpConfig, this.primaryInternalLBBackendsToRemoveOnUpdate.toArray(new String[0])); associateBackEndsToIpConfiguration( primaryInternalLoadBalancer.id(), primaryIpConfig, this.primaryInternalLBBackendsToAddOnUpdate.toArray(new String[0])); removeInboundNatPoolsFromIpConfiguration( this.primaryInternalLoadBalancer.id(), primaryIpConfig, this.primaryInternalLBInboundNatPoolsToRemoveOnUpdate.toArray(new String[0])); associateInboundNATPoolsToIpConfiguration( primaryInternalLoadBalancer.id(), primaryIpConfig, this.primaryInternalLBInboundNatPoolsToAddOnUpdate.toArray(new String[0])); } if (this.removePrimaryInternetFacingLoadBalancerOnUpdate) { if (this.primaryInternetFacingLoadBalancer != null) { removeLoadBalancerAssociationFromIpConfiguration( this.primaryInternetFacingLoadBalancer, primaryIpConfig); } } if (this.removePrimaryInternalLoadBalancerOnUpdate) { if (this.primaryInternalLoadBalancer != null) { removeLoadBalancerAssociationFromIpConfiguration( this.primaryInternalLoadBalancer, primaryIpConfig); } } if (this.primaryInternetFacingLoadBalancerToAttachOnUpdate != null) { if (this.primaryInternetFacingLoadBalancer != null) { removeLoadBalancerAssociationFromIpConfiguration( this.primaryInternetFacingLoadBalancer, primaryIpConfig); } associateLoadBalancerToIpConfiguration( this.primaryInternetFacingLoadBalancerToAttachOnUpdate, primaryIpConfig); if (!this.primaryInternetFacingLBBackendsToAddOnUpdate.isEmpty()) { removeAllBackendAssociationFromIpConfiguration( this.primaryInternetFacingLoadBalancerToAttachOnUpdate, primaryIpConfig); associateBackEndsToIpConfiguration( this.primaryInternetFacingLoadBalancerToAttachOnUpdate.id(), primaryIpConfig, this.primaryInternetFacingLBBackendsToAddOnUpdate.toArray(new String[0])); } if (!this.primaryInternetFacingLBInboundNatPoolsToAddOnUpdate.isEmpty()) { removeAllInboundNatPoolAssociationFromIpConfiguration( this.primaryInternetFacingLoadBalancerToAttachOnUpdate, primaryIpConfig); associateInboundNATPoolsToIpConfiguration( this.primaryInternetFacingLoadBalancerToAttachOnUpdate.id(), primaryIpConfig, this.primaryInternetFacingLBInboundNatPoolsToAddOnUpdate.toArray(new String[0])); } } if (this.primaryInternalLoadBalancerToAttachOnUpdate != null) { if (this.primaryInternalLoadBalancer != null) { removeLoadBalancerAssociationFromIpConfiguration( this.primaryInternalLoadBalancer, primaryIpConfig); } associateLoadBalancerToIpConfiguration( this.primaryInternalLoadBalancerToAttachOnUpdate, primaryIpConfig); if (!this.primaryInternalLBBackendsToAddOnUpdate.isEmpty()) { removeAllBackendAssociationFromIpConfiguration( this.primaryInternalLoadBalancerToAttachOnUpdate, primaryIpConfig); associateBackEndsToIpConfiguration( this.primaryInternalLoadBalancerToAttachOnUpdate.id(), primaryIpConfig, this.primaryInternalLBBackendsToAddOnUpdate.toArray(new String[0])); } if (!this.primaryInternalLBInboundNatPoolsToAddOnUpdate.isEmpty()) { removeAllInboundNatPoolAssociationFromIpConfiguration( this.primaryInternalLoadBalancerToAttachOnUpdate, primaryIpConfig); associateInboundNATPoolsToIpConfiguration( this.primaryInternalLoadBalancerToAttachOnUpdate.id(), primaryIpConfig, this.primaryInternalLBInboundNatPoolsToAddOnUpdate.toArray(new String[0])); } } this.removePrimaryInternetFacingLoadBalancerOnUpdate = false; this.removePrimaryInternalLoadBalancerOnUpdate = false; this.primaryInternetFacingLoadBalancerToAttachOnUpdate = null; this.primaryInternalLoadBalancerToAttachOnUpdate = null; this.primaryInternetFacingLBBackendsToRemoveOnUpdate.clear(); this.primaryInternetFacingLBInboundNatPoolsToRemoveOnUpdate.clear(); this.primaryInternalLBBackendsToRemoveOnUpdate.clear(); this.primaryInternalLBInboundNatPoolsToRemoveOnUpdate.clear(); this.primaryInternetFacingLBBackendsToAddOnUpdate.clear(); this.primaryInternetFacingLBInboundNatPoolsToAddOnUpdate.clear(); this.primaryInternalLBBackendsToAddOnUpdate.clear(); this.primaryInternalLBInboundNatPoolsToAddOnUpdate.clear(); return this; }); } catch (IOException ioException) { throw logger.logExceptionAsError(new RuntimeException(ioException)); } } private void clearCachedProperties() { this.primaryInternetFacingLoadBalancer = null; this.primaryInternalLoadBalancer = null; this.profileAttached = false; } private Mono<VirtualMachineScaleSetImpl> loadCurrentPrimaryLoadBalancersIfAvailableAsync() throws IOException { Mono<VirtualMachineScaleSetImpl> self = Mono.just(this); if (this.primaryInternetFacingLoadBalancer != null && this.primaryInternalLoadBalancer != null) { return self; } String firstLoadBalancerId = null; VirtualMachineScaleSetIpConfiguration ipConfig = primaryNicDefaultIpConfiguration(); if (ipConfig == null) { return self; } if (!ipConfig.loadBalancerBackendAddressPools().isEmpty()) { firstLoadBalancerId = ResourceUtils.parentResourceIdFromResourceId(ipConfig.loadBalancerBackendAddressPools().get(0).id()); } if (firstLoadBalancerId == null && !ipConfig.loadBalancerInboundNatPools().isEmpty()) { firstLoadBalancerId = ResourceUtils.parentResourceIdFromResourceId(ipConfig.loadBalancerInboundNatPools().get(0).id()); } if (firstLoadBalancerId == null) { return self; } self = self .concatWith( Mono .just(firstLoadBalancerId) .flatMap( id -> this .networkManager .loadBalancers() .getByIdAsync(id) .map( loadBalancer1 -> { if (loadBalancer1.publicIpAddressIds() != null && loadBalancer1.publicIpAddressIds().size() > 0) { this.primaryInternetFacingLoadBalancer = loadBalancer1; } else { this.primaryInternalLoadBalancer = loadBalancer1; } return this; }))) .last(); String secondLoadBalancerId = null; for (SubResource subResource : ipConfig.loadBalancerBackendAddressPools()) { if (!subResource.id().toLowerCase(Locale.ROOT).startsWith(firstLoadBalancerId.toLowerCase(Locale.ROOT))) { secondLoadBalancerId = ResourceUtils.parentResourceIdFromResourceId(subResource.id()); break; } } if (secondLoadBalancerId == null) { for (SubResource subResource : ipConfig.loadBalancerInboundNatPools()) { if (!subResource .id() .toLowerCase(Locale.ROOT) .startsWith(firstLoadBalancerId.toLowerCase(Locale.ROOT))) { secondLoadBalancerId = ResourceUtils.parentResourceIdFromResourceId(subResource.id()); break; } } } if (secondLoadBalancerId == null) { return self; } return self .concatWith( Mono .just(secondLoadBalancerId) .flatMap( id -> networkManager .loadBalancers() .getByIdAsync(id) .map( loadBalancer2 -> { if (loadBalancer2.publicIpAddressIds() != null && loadBalancer2.publicIpAddressIds().size() > 0) { this.primaryInternetFacingLoadBalancer = loadBalancer2; } else { this.primaryInternalLoadBalancer = loadBalancer2; } return this; }))) .last(); } private VirtualMachineScaleSetIpConfiguration primaryNicDefaultIpConfiguration() { if (this.innerModel() == null || this.innerModel().virtualMachineProfile() == null) { return null; } List<VirtualMachineScaleSetNetworkConfiguration> nicConfigurations = this.innerModel().virtualMachineProfile().networkProfile().networkInterfaceConfigurations(); for (VirtualMachineScaleSetNetworkConfiguration nicConfiguration : nicConfigurations) { if (nicConfiguration.primary()) { if (nicConfiguration.ipConfigurations().size() > 0) { VirtualMachineScaleSetIpConfiguration ipConfig = nicConfiguration.ipConfigurations().get(0); if (ipConfig.loadBalancerBackendAddressPools() == null) { ipConfig.withLoadBalancerBackendAddressPools(new ArrayList<>()); } if (ipConfig.loadBalancerInboundNatPools() == null) { ipConfig.withLoadBalancerInboundNatPools(new ArrayList<>()); } return ipConfig; } } } throw logger .logExceptionAsError( new RuntimeException("Could not find the primary nic configuration or an IP configuration in it")); } private VirtualMachineScaleSetNetworkConfiguration primaryNicConfiguration() { if (this.innerModel() == null || this.innerModel().virtualMachineProfile() == null) { return null; } List<VirtualMachineScaleSetNetworkConfiguration> nicConfigurations = this.innerModel().virtualMachineProfile().networkProfile().networkInterfaceConfigurations(); for (VirtualMachineScaleSetNetworkConfiguration nicConfiguration : nicConfigurations) { if (nicConfiguration.primary()) { return nicConfiguration; } } throw logger.logExceptionAsError(new RuntimeException("Could not find the primary nic configuration")); } private static void associateBackEndsToIpConfiguration( String loadBalancerId, VirtualMachineScaleSetIpConfiguration ipConfig, String... backendNames) { List<SubResource> backendSubResourcesToAssociate = new ArrayList<>(); for (String backendName : backendNames) { String backendPoolId = mergePath(loadBalancerId, "backendAddressPools", backendName); boolean found = false; for (SubResource subResource : ipConfig.loadBalancerBackendAddressPools()) { if (subResource.id().equalsIgnoreCase(backendPoolId)) { found = true; break; } } if (!found) { backendSubResourcesToAssociate.add(new SubResource().withId(backendPoolId)); } } for (SubResource backendSubResource : backendSubResourcesToAssociate) { ipConfig.loadBalancerBackendAddressPools().add(backendSubResource); } } private static void associateInboundNATPoolsToIpConfiguration( String loadBalancerId, VirtualMachineScaleSetIpConfiguration ipConfig, String... inboundNatPools) { List<SubResource> inboundNatPoolSubResourcesToAssociate = new ArrayList<>(); for (String inboundNatPool : inboundNatPools) { String inboundNatPoolId = mergePath(loadBalancerId, "inboundNatPools", inboundNatPool); boolean found = false; for (SubResource subResource : ipConfig.loadBalancerInboundNatPools()) { if (subResource.id().equalsIgnoreCase(inboundNatPoolId)) { found = true; break; } } if (!found) { inboundNatPoolSubResourcesToAssociate.add(new SubResource().withId(inboundNatPoolId)); } } for (SubResource backendSubResource : inboundNatPoolSubResourcesToAssociate) { ipConfig.loadBalancerInboundNatPools().add(backendSubResource); } } private static Map<String, LoadBalancerBackend> getBackendsAssociatedWithIpConfiguration( LoadBalancer loadBalancer, VirtualMachineScaleSetIpConfiguration ipConfig) { String loadBalancerId = loadBalancer.id(); Map<String, LoadBalancerBackend> attachedBackends = new HashMap<>(); Map<String, LoadBalancerBackend> lbBackends = loadBalancer.backends(); for (LoadBalancerBackend lbBackend : lbBackends.values()) { String backendId = mergePath(loadBalancerId, "backendAddressPools", lbBackend.name()); for (SubResource subResource : ipConfig.loadBalancerBackendAddressPools()) { if (subResource.id().equalsIgnoreCase(backendId)) { attachedBackends.put(lbBackend.name(), lbBackend); } } } return attachedBackends; } private static Map<String, LoadBalancerInboundNatPool> getInboundNatPoolsAssociatedWithIpConfiguration( LoadBalancer loadBalancer, VirtualMachineScaleSetIpConfiguration ipConfig) { String loadBalancerId = loadBalancer.id(); Map<String, LoadBalancerInboundNatPool> attachedInboundNatPools = new HashMap<>(); Map<String, LoadBalancerInboundNatPool> lbInboundNatPools = loadBalancer.inboundNatPools(); for (LoadBalancerInboundNatPool lbInboundNatPool : lbInboundNatPools.values()) { String inboundNatPoolId = mergePath(loadBalancerId, "inboundNatPools", lbInboundNatPool.name()); for (SubResource subResource : ipConfig.loadBalancerInboundNatPools()) { if (subResource.id().equalsIgnoreCase(inboundNatPoolId)) { attachedInboundNatPools.put(lbInboundNatPool.name(), lbInboundNatPool); } } } return attachedInboundNatPools; } private static void associateLoadBalancerToIpConfiguration( LoadBalancer loadBalancer, VirtualMachineScaleSetIpConfiguration ipConfig) { Collection<LoadBalancerBackend> backends = loadBalancer.backends().values(); String[] backendNames = new String[backends.size()]; int i = 0; for (LoadBalancerBackend backend : backends) { backendNames[i] = backend.name(); i++; } associateBackEndsToIpConfiguration(loadBalancer.id(), ipConfig, backendNames); Collection<LoadBalancerInboundNatPool> inboundNatPools = loadBalancer.inboundNatPools().values(); String[] natPoolNames = new String[inboundNatPools.size()]; i = 0; for (LoadBalancerInboundNatPool inboundNatPool : inboundNatPools) { natPoolNames[i] = inboundNatPool.name(); i++; } associateInboundNATPoolsToIpConfiguration(loadBalancer.id(), ipConfig, natPoolNames); } private static void removeLoadBalancerAssociationFromIpConfiguration( LoadBalancer loadBalancer, VirtualMachineScaleSetIpConfiguration ipConfig) { removeAllBackendAssociationFromIpConfiguration(loadBalancer, ipConfig); removeAllInboundNatPoolAssociationFromIpConfiguration(loadBalancer, ipConfig); } private static void removeAllBackendAssociationFromIpConfiguration( LoadBalancer loadBalancer, VirtualMachineScaleSetIpConfiguration ipConfig) { List<SubResource> toRemove = new ArrayList<>(); for (SubResource subResource : ipConfig.loadBalancerBackendAddressPools()) { if (subResource .id() .toLowerCase(Locale.ROOT) .startsWith(loadBalancer.id().toLowerCase(Locale.ROOT) + "/")) { toRemove.add(subResource); } } for (SubResource subResource : toRemove) { ipConfig.loadBalancerBackendAddressPools().remove(subResource); } } private static void removeAllInboundNatPoolAssociationFromIpConfiguration( LoadBalancer loadBalancer, VirtualMachineScaleSetIpConfiguration ipConfig) { List<SubResource> toRemove = new ArrayList<>(); for (SubResource subResource : ipConfig.loadBalancerInboundNatPools()) { if (subResource .id() .toLowerCase(Locale.ROOT) .startsWith(loadBalancer.id().toLowerCase(Locale.ROOT) + "/")) { toRemove.add(subResource); } } for (SubResource subResource : toRemove) { ipConfig.loadBalancerInboundNatPools().remove(subResource); } } private static void removeBackendsFromIpConfiguration( String loadBalancerId, VirtualMachineScaleSetIpConfiguration ipConfig, String... backendNames) { List<SubResource> toRemove = new ArrayList<>(); for (String backendName : backendNames) { String backendPoolId = mergePath(loadBalancerId, "backendAddressPools", backendName); for (SubResource subResource : ipConfig.loadBalancerBackendAddressPools()) { if (subResource.id().equalsIgnoreCase(backendPoolId)) { toRemove.add(subResource); break; } } } for (SubResource subResource : toRemove) { ipConfig.loadBalancerBackendAddressPools().remove(subResource); } } private static void removeInboundNatPoolsFromIpConfiguration( String loadBalancerId, VirtualMachineScaleSetIpConfiguration ipConfig, String... inboundNatPoolNames) { List<SubResource> toRemove = new ArrayList<>(); for (String natPoolName : inboundNatPoolNames) { String inboundNatPoolId = mergePath(loadBalancerId, "inboundNatPools", natPoolName); for (SubResource subResource : ipConfig.loadBalancerInboundNatPools()) { if (subResource.id().equalsIgnoreCase(inboundNatPoolId)) { toRemove.add(subResource); break; } } } for (SubResource subResource : toRemove) { ipConfig.loadBalancerInboundNatPools().remove(subResource); } } private static <T> void addToList(List<T> list, T[] items) { list.addAll(Arrays.asList(items)); } private static String mergePath(String... segments) { StringBuilder builder = new StringBuilder(); for (String segment : segments) { while (segment.length() > 1 && segment.endsWith("/")) { segment = segment.substring(0, segment.length() - 1); } if (segment.length() > 0) { builder.append(segment); builder.append("/"); } } String merged = builder.toString(); if (merged.endsWith("/")) { merged = merged.substring(0, merged.length() - 1); } return merged; } RoleAssignmentHelper.IdProvider idProvider() { return new RoleAssignmentHelper.IdProvider() { @Override public String principalId() { if (innerModel() != null && innerModel().identity() != null) { return innerModel().identity().principalId(); } else { return null; } } @Override public String resourceId() { if (innerModel() != null) { return innerModel().id(); } else { return null; } } }; } protected VirtualMachineScaleSetImpl withUnmanagedDataDisk( VirtualMachineScaleSetUnmanagedDataDiskImpl unmanagedDisk) { initVMProfileIfNecessary(); if (this.innerModel().virtualMachineProfile().storageProfile().dataDisks() == null) { this .innerModel() .virtualMachineProfile() .storageProfile() .withDataDisks(new ArrayList<VirtualMachineScaleSetDataDisk>()); } List<VirtualMachineScaleSetDataDisk> dataDisks = this.innerModel().virtualMachineProfile().storageProfile().dataDisks(); dataDisks.add(unmanagedDisk.innerModel()); return this; } @Override public VirtualMachineScaleSetImpl withAvailabilityZone(AvailabilityZoneId zoneId) { if (this.innerModel().zones() == null) { this.innerModel().withZones(new ArrayList<>()); } this.innerModel().zones().add(zoneId.toString()); return this; } /** * Checks whether the OS disk is based on an image (image from PIR or custom image [captured, bringYourOwnFeature]). * * @param osDisk the osDisk value in the storage profile * @return true if the OS disk is configured to use image from PIR or custom image */ private boolean isOSDiskFromImage(VirtualMachineScaleSetOSDisk osDisk) { return osDisk.createOption() == DiskCreateOptionTypes.FROM_IMAGE; } /** * Checks whether the OS disk is based on a CustomImage. * * <p>A custom image is represented by {@link VirtualMachineCustomImage}. * * @param storageProfile the storage profile * @return true if the OS disk is configured to be based on custom image. */ private boolean isOsDiskFromCustomImage(VirtualMachineScaleSetStorageProfile storageProfile) { ImageReference imageReference = storageProfile.imageReference(); return isOSDiskFromImage(storageProfile.osDisk()) && imageReference != null && imageReference.id() != null; } /** * Checks whether the OS disk is based on an platform image (image in PIR). * * @param storageProfile the storage profile * @return true if the OS disk is configured to be based on platform image. */ private boolean isOSDiskFromPlatformImage(VirtualMachineScaleSetStorageProfile storageProfile) { ImageReference imageReference = storageProfile.imageReference(); return isOSDiskFromImage(storageProfile.osDisk()) && imageReference != null && imageReference.publisher() != null && imageReference.offer() != null && imageReference.sku() != null && imageReference.version() != null; } /** * Checks whether the OS disk is based on a stored image ('captured' or 'bring your own feature'). * * @param storageProfile the storage profile * @return true if the OS disk is configured to use custom image ('captured' or 'bring your own feature') */ private boolean isOSDiskFromStoredImage(VirtualMachineScaleSetStorageProfile storageProfile) { VirtualMachineScaleSetOSDisk osDisk = storageProfile.osDisk(); return isOSDiskFromImage(osDisk) && osDisk.image() != null && osDisk.image().uri() != null; } private void throwIfManagedDiskDisabled(String message) { if (!this.isManagedDiskEnabled()) { throw logger.logExceptionAsError(new UnsupportedOperationException(message)); } } @Override public VirtualMachineScaleSetImpl withBootDiagnosticsOnManagedStorageAccount() { this.bootDiagnosticsHandler.withBootDiagnostics(true); return this; } @Override public VirtualMachineScaleSetImpl withBootDiagnostics() { this.bootDiagnosticsHandler.withBootDiagnostics(false); return this; } @Override public VirtualMachineScaleSetImpl withBootDiagnostics(Creatable<StorageAccount> creatable) { this.bootDiagnosticsHandler.withBootDiagnostics(creatable); return this; } @Override public VirtualMachineScaleSetImpl withBootDiagnostics(StorageAccount storageAccount) { this.bootDiagnosticsHandler.withBootDiagnostics(storageAccount); return this; } @Override public VirtualMachineScaleSetImpl withBootDiagnostics(String storageAccountBlobEndpointUri) { this.bootDiagnosticsHandler.withBootDiagnostics(storageAccountBlobEndpointUri); return this; } @Override public VirtualMachineScaleSetImpl withoutBootDiagnostics() { this.bootDiagnosticsHandler.withoutBootDiagnostics(); return this; } @Override public VirtualMachineScaleSetImpl withMaxPrice(Double maxPrice) { this.innerModel().virtualMachineProfile().withBillingProfile(new BillingProfile().withMaxPrice(maxPrice)); return this; } @Override public VirtualMachineScaleSetImpl withVirtualMachinePriority(VirtualMachinePriorityTypes priority) { this.innerModel().virtualMachineProfile().withPriority(priority); return this; } @Override public VirtualMachineScaleSetImpl withLowPriorityVirtualMachine() { this.withVirtualMachinePriority(VirtualMachinePriorityTypes.LOW); return this; } @Override public VirtualMachineScaleSetImpl withLowPriorityVirtualMachine(VirtualMachineEvictionPolicyTypes policy) { this.withLowPriorityVirtualMachine(); this.innerModel().virtualMachineProfile().withEvictionPolicy(policy); return this; } @Override public VirtualMachineScaleSetImpl withSpotPriorityVirtualMachine() { this.withVirtualMachinePriority(VirtualMachinePriorityTypes.SPOT); return this; } @Override public VirtualMachineScaleSetImpl withSpotPriorityVirtualMachine(VirtualMachineEvictionPolicyTypes policy) { this.withSpotPriorityVirtualMachine(); this.innerModel().virtualMachineProfile().withEvictionPolicy(policy); return this; } @Override public VirtualMachineScaleSetImpl withVirtualMachinePublicIp() { initVMProfileIfNecessary(); VirtualMachineScaleSetIpConfiguration nicIpConfig = this.primaryNicDefaultIpConfiguration(); if (nicIpConfig.publicIpAddressConfiguration() != null) { return this; } else { VirtualMachineScaleSetPublicIpAddressConfiguration pipConfig = new VirtualMachineScaleSetPublicIpAddressConfiguration(); pipConfig.withName("pip1"); pipConfig.withIdleTimeoutInMinutes(15); nicIpConfig.withPublicIpAddressConfiguration(pipConfig); return this; } } @Override public VirtualMachineScaleSetImpl withVirtualMachinePublicIp(String leafDomainLabel) { initVMProfileIfNecessary(); VirtualMachineScaleSetIpConfiguration nicIpConfig = this.primaryNicDefaultIpConfiguration(); if (nicIpConfig.publicIpAddressConfiguration() != null) { if (nicIpConfig.publicIpAddressConfiguration().dnsSettings() != null) { nicIpConfig.publicIpAddressConfiguration().dnsSettings().withDomainNameLabel(leafDomainLabel); } else { nicIpConfig .publicIpAddressConfiguration() .withDnsSettings(new VirtualMachineScaleSetPublicIpAddressConfigurationDnsSettings()); nicIpConfig.publicIpAddressConfiguration().dnsSettings().withDomainNameLabel(leafDomainLabel); } } else { VirtualMachineScaleSetPublicIpAddressConfiguration pipConfig = new VirtualMachineScaleSetPublicIpAddressConfiguration(); pipConfig.withName("pip1"); pipConfig.withIdleTimeoutInMinutes(15); pipConfig.withDnsSettings(new VirtualMachineScaleSetPublicIpAddressConfigurationDnsSettings()); pipConfig.dnsSettings().withDomainNameLabel(leafDomainLabel); nicIpConfig.withPublicIpAddressConfiguration(pipConfig); } return this; } @Override public VirtualMachineScaleSetImpl withVirtualMachinePublicIp( VirtualMachineScaleSetPublicIpAddressConfiguration pipConfig) { initVMProfileIfNecessary(); VirtualMachineScaleSetIpConfiguration nicIpConfig = this.primaryNicDefaultIpConfiguration(); nicIpConfig.withPublicIpAddressConfiguration(pipConfig); return this; } @Override public VirtualMachineScaleSetImpl withAcceleratedNetworking() { initVMProfileIfNecessary(); VirtualMachineScaleSetNetworkConfiguration nicConfig = this.primaryNicConfiguration(); nicConfig.withEnableAcceleratedNetworking(true); return this; } @Override public VirtualMachineScaleSetImpl withoutAcceleratedNetworking() { initVMProfileIfNecessary(); VirtualMachineScaleSetNetworkConfiguration nicConfig = this.primaryNicConfiguration(); nicConfig.withEnableAcceleratedNetworking(false); return this; } @Override public VirtualMachineScaleSetImpl withIpForwarding() { initVMProfileIfNecessary(); VirtualMachineScaleSetNetworkConfiguration nicConfig = this.primaryNicConfiguration(); nicConfig.withEnableIpForwarding(true); return this; } @Override public VirtualMachineScaleSetImpl withoutIpForwarding() { VirtualMachineScaleSetNetworkConfiguration nicConfig = this.primaryNicConfiguration(); if (nicConfig == null) { return this; } nicConfig.withEnableIpForwarding(false); return this; } @Override public VirtualMachineScaleSetImpl withExistingNetworkSecurityGroup(NetworkSecurityGroup networkSecurityGroup) { initVMProfileIfNecessary(); VirtualMachineScaleSetNetworkConfiguration nicConfig = this.primaryNicConfiguration(); nicConfig.withNetworkSecurityGroup(new SubResource().withId(networkSecurityGroup.id())); return this; } @Override public VirtualMachineScaleSetImpl withExistingNetworkSecurityGroupId(String networkSecurityGroupId) { initVMProfileIfNecessary(); VirtualMachineScaleSetNetworkConfiguration nicConfig = this.primaryNicConfiguration(); nicConfig.withNetworkSecurityGroup(new SubResource().withId(networkSecurityGroupId)); return this; } @Override public VirtualMachineScaleSetImpl withoutNetworkSecurityGroup() { VirtualMachineScaleSetNetworkConfiguration nicConfig = this.primaryNicConfiguration(); if (nicConfig == null) { return this; } nicConfig.withNetworkSecurityGroup(null); return this; } @Override public VirtualMachineScaleSetImpl withSinglePlacementGroup() { this.innerModel().withSinglePlacementGroup(true); return this; } @Override public VirtualMachineScaleSetImpl withoutSinglePlacementGroup() { this.innerModel().withSinglePlacementGroup(false); return this; } @Override public VirtualMachineScaleSetImpl withExistingApplicationGatewayBackendPool(String backendPoolId) { initVMProfileIfNecessary(); VirtualMachineScaleSetIpConfiguration nicIpConfig = primaryNicDefaultIpConfiguration(); if (nicIpConfig.applicationGatewayBackendAddressPools() == null) { nicIpConfig.withApplicationGatewayBackendAddressPools(new ArrayList<>()); } boolean found = false; for (SubResource backendPool : nicIpConfig.applicationGatewayBackendAddressPools()) { if (backendPool.id().equalsIgnoreCase(backendPoolId)) { found = true; break; } } if (!found) { nicIpConfig.applicationGatewayBackendAddressPools().add(new SubResource().withId(backendPoolId)); } return this; } @Override public VirtualMachineScaleSetImpl withoutApplicationGatewayBackendPool(String backendPoolId) { VirtualMachineScaleSetIpConfiguration nicIpConfig = primaryNicDefaultIpConfiguration(); if (nicIpConfig == null || nicIpConfig.applicationGatewayBackendAddressPools() == null) { return this; } else { int foundIndex = -1; int index = -1; for (SubResource backendPool : nicIpConfig.applicationGatewayBackendAddressPools()) { index = index + 1; if (backendPool.id().equalsIgnoreCase(backendPoolId)) { foundIndex = index; break; } } if (foundIndex != -1) { nicIpConfig.applicationGatewayBackendAddressPools().remove(foundIndex); } return this; } } @Override public VirtualMachineScaleSetImpl withExistingApplicationSecurityGroup( ApplicationSecurityGroup applicationSecurityGroup) { return withExistingApplicationSecurityGroupId(applicationSecurityGroup.id()); } @Override public VirtualMachineScaleSetImpl withExistingApplicationSecurityGroupId(String applicationSecurityGroupId) { initVMProfileIfNecessary(); VirtualMachineScaleSetIpConfiguration nicIpConfig = primaryNicDefaultIpConfiguration(); if (nicIpConfig.applicationSecurityGroups() == null) { nicIpConfig.withApplicationSecurityGroups(new ArrayList<>()); } boolean found = false; for (SubResource asg : nicIpConfig.applicationSecurityGroups()) { if (asg.id().equalsIgnoreCase(applicationSecurityGroupId)) { found = true; break; } } if (!found) { nicIpConfig.applicationSecurityGroups().add(new SubResource().withId(applicationSecurityGroupId)); } return this; } @Override public VirtualMachineScaleSetImpl withoutApplicationSecurityGroup(String applicationSecurityGroupId) { VirtualMachineScaleSetIpConfiguration nicIpConfig = primaryNicDefaultIpConfiguration(); if (nicIpConfig == null || nicIpConfig.applicationSecurityGroups() == null) { return this; } else { int foundIndex = -1; int index = -1; for (SubResource asg : nicIpConfig.applicationSecurityGroups()) { index = index + 1; if (asg.id().equalsIgnoreCase(applicationSecurityGroupId)) { foundIndex = index; break; } } if (foundIndex != -1) { nicIpConfig.applicationSecurityGroups().remove(foundIndex); } return this; } } @Override public VirtualMachineScaleSetImpl withProximityPlacementGroup(String proximityPlacementGroupId) { this.innerModel().withProximityPlacementGroup(new SubResource().withId(proximityPlacementGroupId)); this.newProximityPlacementGroupName = null; return this; } @Override public VirtualMachineScaleSetImpl withNewProximityPlacementGroup( String proximityPlacementGroupName, ProximityPlacementGroupType type) { this.newProximityPlacementGroupName = proximityPlacementGroupName; this.newProximityPlacementGroupType = type; this.innerModel().withProximityPlacementGroup(null); return this; } @Override public VirtualMachineScaleSetImpl withDoNotRunExtensionsOnOverprovisionedVMs( Boolean doNotRunExtensionsOnOverprovisionedVMs) { this.innerModel().withDoNotRunExtensionsOnOverprovisionedVMs(doNotRunExtensionsOnOverprovisionedVMs); return this; } @Override public VirtualMachineScaleSetImpl withAdditionalCapabilities(AdditionalCapabilities additionalCapabilities) { this.innerModel().withAdditionalCapabilities(additionalCapabilities); return this; } private void createNewProximityPlacementGroup() { if (isInCreateMode()) { if (this.newProximityPlacementGroupName != null && !this.newProximityPlacementGroupName.isEmpty()) { ProximityPlacementGroupInner plgInner = new ProximityPlacementGroupInner(); plgInner.withProximityPlacementGroupType(this.newProximityPlacementGroupType); plgInner.withLocation(this.innerModel().location()); plgInner = this .manager() .serviceClient() .getProximityPlacementGroups() .createOrUpdate(this.resourceGroupName(), this.newProximityPlacementGroupName, plgInner); this.innerModel().withProximityPlacementGroup((new SubResource().withId(plgInner.id()))); } } } @Override public VirtualMachineScaleSetImpl withPlan(PurchasePlan plan) { this.innerModel().withPlan(new Plan()); this.innerModel().plan().withPublisher(plan.publisher()).withProduct(plan.product()).withName(plan.name()); return this; } /** * Class to manage Data Disk collection. */ private class ManagedDataDiskCollection { private final List<VirtualMachineScaleSetDataDisk> implicitDisksToAssociate = new ArrayList<>(); private final List<Integer> diskLunsToRemove = new ArrayList<>(); private final List<VirtualMachineScaleSetDataDisk> newDisksFromImage = new ArrayList<>(); private final VirtualMachineScaleSetImpl vmss; private CachingTypes defaultCachingType; private StorageAccountTypes defaultStorageAccountType; ManagedDataDiskCollection(VirtualMachineScaleSetImpl vmss) { this.vmss = vmss; } void setDefaultCachingType(CachingTypes cachingType) { this.defaultCachingType = cachingType; } void setDefaultStorageAccountType(StorageAccountTypes defaultStorageAccountType) { this.defaultStorageAccountType = defaultStorageAccountType; } void setDataDisksDefaults() { if (this.vmss.innerModel() == null || this.vmss.innerModel().virtualMachineProfile() == null) { return; } VirtualMachineScaleSetStorageProfile storageProfile = this.vmss.innerModel().virtualMachineProfile().storageProfile(); if (isPending()) { if (storageProfile.dataDisks() == null) { storageProfile.withDataDisks(new ArrayList<>()); } List<VirtualMachineScaleSetDataDisk> dataDisks = storageProfile.dataDisks(); final List<Integer> usedLuns = new ArrayList<>(); for (VirtualMachineScaleSetDataDisk dataDisk : dataDisks) { if (dataDisk.lun() != -1) { usedLuns.add(dataDisk.lun()); } } for (VirtualMachineScaleSetDataDisk dataDisk : this.implicitDisksToAssociate) { if (dataDisk.lun() != -1) { usedLuns.add(dataDisk.lun()); } } for (VirtualMachineScaleSetDataDisk dataDisk : this.newDisksFromImage) { if (dataDisk.lun() != -1) { usedLuns.add(dataDisk.lun()); } } Callable<Integer> nextLun = () -> { Integer lun = 0; while (usedLuns.contains(lun)) { lun++; } usedLuns.add(lun); return lun; }; try { setImplicitDataDisks(nextLun); } catch (Exception ex) { throw logger.logExceptionAsError(Exceptions.propagate(ex)); } setImageBasedDataDisks(); removeDataDisks(); } if (storageProfile.dataDisks() != null && storageProfile.dataDisks().size() == 0) { if (vmss.isInCreateMode()) { storageProfile.withDataDisks(null); } } this.clear(); } private void clear() { implicitDisksToAssociate.clear(); diskLunsToRemove.clear(); newDisksFromImage.clear(); } private boolean isPending() { return implicitDisksToAssociate.size() > 0 || diskLunsToRemove.size() > 0 || newDisksFromImage.size() > 0; } private void setImplicitDataDisks(Callable<Integer> nextLun) throws Exception { if (this.vmss.innerModel() == null || this.vmss.innerModel().virtualMachineProfile() == null) { return; } VirtualMachineScaleSetStorageProfile storageProfile = this.vmss.innerModel().virtualMachineProfile().storageProfile(); List<VirtualMachineScaleSetDataDisk> dataDisks = storageProfile.dataDisks(); for (VirtualMachineScaleSetDataDisk dataDisk : this.implicitDisksToAssociate) { dataDisk.withCreateOption(DiskCreateOptionTypes.EMPTY); if (dataDisk.lun() == -1) { dataDisk.withLun(nextLun.call()); } if (dataDisk.managedDisk() == null) { dataDisk.withManagedDisk(new VirtualMachineScaleSetManagedDiskParameters()); } if (dataDisk.caching() == null) { dataDisk.withCaching(getDefaultCachingType()); } if (dataDisk.managedDisk().storageAccountType() == null) { dataDisk.managedDisk().withStorageAccountType(getDefaultStorageAccountType()); } dataDisk.withName(null); dataDisks.add(dataDisk); } } private void setImageBasedDataDisks() { if (this.vmss.innerModel() == null || this.vmss.innerModel().virtualMachineProfile() == null) { return; } VirtualMachineScaleSetStorageProfile storageProfile = this.vmss.innerModel().virtualMachineProfile().storageProfile(); List<VirtualMachineScaleSetDataDisk> dataDisks = storageProfile.dataDisks(); for (VirtualMachineScaleSetDataDisk dataDisk : this.newDisksFromImage) { dataDisk.withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); dataDisk.withName(null); dataDisks.add(dataDisk); } } private void removeDataDisks() { if (this.vmss.innerModel() == null || this.vmss.innerModel().virtualMachineProfile() == null) { return; } VirtualMachineScaleSetStorageProfile storageProfile = this.vmss.innerModel().virtualMachineProfile().storageProfile(); List<VirtualMachineScaleSetDataDisk> dataDisks = storageProfile.dataDisks(); for (Integer lun : this.diskLunsToRemove) { int indexToRemove = 0; for (VirtualMachineScaleSetDataDisk dataDisk : dataDisks) { if (dataDisk.lun() == lun) { dataDisks.remove(indexToRemove); break; } indexToRemove++; } } } private CachingTypes getDefaultCachingType() { if (defaultCachingType == null) { return CachingTypes.READ_WRITE; } return defaultCachingType; } private StorageAccountTypes getDefaultStorageAccountType() { if (defaultStorageAccountType == null) { return StorageAccountTypes.STANDARD_LRS; } return defaultStorageAccountType; } } /** Class to manage VMSS boot diagnostics settings. */ private class BootDiagnosticsHandler { private final VirtualMachineScaleSetImpl vmssImpl; private String creatableDiagnosticsStorageAccountKey; private String creatableStorageAccountKey; private StorageAccount existingStorageAccountToAssociate; private boolean useManagedStorageAccount = false; BootDiagnosticsHandler(VirtualMachineScaleSetImpl vmssImpl) { this.vmssImpl = vmssImpl; if (isBootDiagnosticsEnabled() && this.vmssInner() != null && this.vmssInner().virtualMachineProfile() != null && this.vmssInner().virtualMachineProfile() .diagnosticsProfile().bootDiagnostics().storageUri() == null) { this.useManagedStorageAccount = true; } } public boolean isBootDiagnosticsEnabled() { if (this.vmssInner() == null || this.vmssInner().virtualMachineProfile() == null) { return false; } DiagnosticsProfile diagnosticsProfile = this.vmssInner().virtualMachineProfile().diagnosticsProfile(); if (diagnosticsProfile != null && diagnosticsProfile.bootDiagnostics() != null && diagnosticsProfile.bootDiagnostics().enabled() != null) { return diagnosticsProfile.bootDiagnostics().enabled(); } return false; } public String bootDiagnosticsStorageUri() { if (this.vmssInner() == null || this.vmssInner().virtualMachineProfile() == null) { return null; } DiagnosticsProfile diagnosticsProfile = this.vmssInner().virtualMachineProfile().diagnosticsProfile(); if (diagnosticsProfile != null && diagnosticsProfile.bootDiagnostics() != null) { return diagnosticsProfile.bootDiagnostics().storageUri(); } return null; } BootDiagnosticsHandler withBootDiagnostics(boolean useManagedStorageAccount) { this.enableDisable(true); this.useManagedStorageAccount = useManagedStorageAccount; return this; } BootDiagnosticsHandler withBootDiagnostics(Creatable<StorageAccount> creatable) { this.enableDisable(true); this.useManagedStorageAccount = false; this.creatableDiagnosticsStorageAccountKey = this.vmssImpl.addDependency(creatable); return this; } BootDiagnosticsHandler withBootDiagnostics(String storageAccountBlobEndpointUri) { if (this.vmssInner() == null || this.vmssInner().virtualMachineProfile() == null) { return this; } this.enableDisable(true); this.useManagedStorageAccount = false; this .vmssInner() .virtualMachineProfile() .diagnosticsProfile() .bootDiagnostics() .withStorageUri(storageAccountBlobEndpointUri); return this; } BootDiagnosticsHandler withBootDiagnostics(StorageAccount storageAccount) { return this.withBootDiagnostics(storageAccount.endPoints().primary().blob()); } BootDiagnosticsHandler withoutBootDiagnostics() { this.enableDisable(false); this.useManagedStorageAccount = false; return this; } void prepare() { if (useManagedStorageAccount) { return; } this.creatableStorageAccountKey = null; this.existingStorageAccountToAssociate = null; if (this.vmssInner() == null || this.vmssInner().virtualMachineProfile() == null) { return; } DiagnosticsProfile diagnosticsProfile = this.vmssInner().virtualMachineProfile().diagnosticsProfile(); if (diagnosticsProfile == null || diagnosticsProfile.bootDiagnostics() == null || diagnosticsProfile.bootDiagnostics().storageUri() != null) { return; } boolean enableBD = ResourceManagerUtils.toPrimitiveBoolean(diagnosticsProfile.bootDiagnostics().enabled()); if (!enableBD) { return; } if (this.creatableDiagnosticsStorageAccountKey != null) { return; } if (!this.vmssImpl.creatableStorageAccountKeys.isEmpty()) { this.creatableStorageAccountKey = this.vmssImpl.creatableStorageAccountKeys.get(0); return; } if (!this.vmssImpl.existingStorageAccountsToAssociate.isEmpty()) { this.existingStorageAccountToAssociate = this.vmssImpl.existingStorageAccountsToAssociate.get(0); return; } String accountName = this.vmssImpl.namer.getRandomName("stg", 24).replace("-", ""); Creatable<StorageAccount> storageAccountCreatable; if (this.vmssImpl.creatableGroup != null) { storageAccountCreatable = this .vmssImpl .storageManager .storageAccounts() .define(accountName) .withRegion(this.vmssImpl.regionName()) .withNewResourceGroup(this.vmssImpl.creatableGroup); } else { storageAccountCreatable = this .vmssImpl .storageManager .storageAccounts() .define(accountName) .withRegion(this.vmssImpl.regionName()) .withExistingResourceGroup(this.vmssImpl.resourceGroupName()); } this.creatableDiagnosticsStorageAccountKey = this.vmssImpl.addDependency(storageAccountCreatable); } void handleDiagnosticsSettings() { if (useManagedStorageAccount) { return; } if (this.vmssInner() == null || this.vmssInner().virtualMachineProfile() == null) { return; } DiagnosticsProfile diagnosticsProfile = this.vmssInner().virtualMachineProfile().diagnosticsProfile(); if (diagnosticsProfile == null || diagnosticsProfile.bootDiagnostics() == null || diagnosticsProfile.bootDiagnostics().storageUri() != null) { return; } boolean enableBD = ResourceManagerUtils.toPrimitiveBoolean(diagnosticsProfile.bootDiagnostics().enabled()); if (!enableBD) { return; } StorageAccount storageAccount = null; if (creatableDiagnosticsStorageAccountKey != null) { storageAccount = this.vmssImpl.taskResult(creatableDiagnosticsStorageAccountKey); } else if (this.creatableStorageAccountKey != null) { storageAccount = this.vmssImpl.taskResult(this.creatableStorageAccountKey); } else if (this.existingStorageAccountToAssociate != null) { storageAccount = this.existingStorageAccountToAssociate; } if (storageAccount == null) { throw logger .logExceptionAsError( new IllegalStateException( "Unable to retrieve expected storageAccount instance for BootDiagnostics")); } vmssInner() .virtualMachineProfile() .diagnosticsProfile() .bootDiagnostics() .withStorageUri(storageAccount.endPoints().primary().blob()); } private VirtualMachineScaleSetInner vmssInner() { return this.vmssImpl.innerModel(); } private void enableDisable(boolean enable) { if (this.vmssInner() == null || this.vmssInner().virtualMachineProfile() == null) { return; } if (this.vmssInner().virtualMachineProfile().diagnosticsProfile() == null) { this.vmssInner().virtualMachineProfile().withDiagnosticsProfile(new DiagnosticsProfile()); } if (this.vmssInner().virtualMachineProfile().diagnosticsProfile().bootDiagnostics() == null) { this .vmssInner() .virtualMachineProfile() .diagnosticsProfile() .withBootDiagnostics(new BootDiagnostics()); } if (enable) { this.vmssInner().virtualMachineProfile().diagnosticsProfile().bootDiagnostics().withEnabled(true); } else { this.vmssInner().virtualMachineProfile().diagnosticsProfile().bootDiagnostics().withEnabled(false); this.vmssInner().virtualMachineProfile().diagnosticsProfile().bootDiagnostics().withStorageUri(null); } } } }
We should have a test for the implementation package to validate that it's a no-op.
public void publicClassImplementsPublicApi() throws Exception { File file = TestUtils.createCheckFile("publicClassImplementsPublicApi", Arrays.asList( "package com.azure;", "import com.azure.PublicClass;", "public class MyClass implements PublicClass {", "}" )); verify(checker, new File[]{file}, file.getAbsolutePath()); }
"package com.azure;",
public void publicClassImplementsPublicApi() throws Exception { File file = TestUtils.createCheckFile("publicClassImplementsPublicApi", Arrays.asList( "package com.azure;", "import com.azure.PublicClass;", "public class MyClass implements PublicClass {", "}" )); verify(checker, new File[]{file}, file.getAbsolutePath()); }
class NoImplInPublicApiTest extends AbstractModuleTestSupport { private Checker checker; @Before public void prepare() throws Exception { checker = createChecker(createModuleConfig(NoImplInPublicAPI.class)); } @After public void cleanup() { checker.destroy(); } @Override protected String getPackageLocation() { return "com/azure/tools/checkstyle/checks/NoImplInPublicApiCheck"; } @Test public void staticInitializerDoesNotCountAsApi() throws Exception { String[] expected = new String[0]; verify(checker, getPath("StaticInitializer.java"), expected); } @Test public void gettersNeedToCheckForImplementation() throws Exception { String[] expected = { expectedErrorMessage(42, 12, String.format(RETURN_TYPE_ERROR, "AnImplementationClass")), expectedErrorMessage(46, 15, String.format(RETURN_TYPE_ERROR, "AnImplementationClass")), expectedErrorMessage(59, 36, String.format(RETURN_TYPE_ERROR, "com.azure.implementation.AnImplementationClass")), expectedErrorMessage(63, 39, String.format(RETURN_TYPE_ERROR, "com.azure.implementation.AnImplementationClass")) }; verify(checker, getPath("Getters.java"), expected); } @Test @Test public void publicClassImplementsImplementationApi() throws Exception { File file = TestUtils.createCheckFile("publicClassImplementsImplementationApi", Arrays.asList( "package com.azure;", "import com.azure.implementation.ImplementationClass;", "public class MyClass implements ImplementationClass {", "}" )); String[] expected = { expectedErrorMessage(3, 33, String.format(IMPLEMENTS_TYPE_ERROR, "ImplementationClass")) }; verify(checker, new File[]{file}, file.getAbsolutePath(), expected); } @Test public void nonPublicClassImplementsImplementationApi() throws Exception { File file = TestUtils.createCheckFile("nonPublicClassImplementsImplementationApi", Arrays.asList( "package com.azure;", "import com.azure.implementation.ImplementationClass;", "class MyClass implements ImplementationClass {", "}" )); verify(checker, new File[]{file}, file.getAbsolutePath()); } @Test public void publicClassExtendsPublicApi() throws Exception { File file = TestUtils.createCheckFile("publicClassExtendsPublicApi", Arrays.asList( "package com.azure;", "import com.azure.PublicClass;", "public class MyClass extends PublicClass {", "}" )); verify(checker, new File[]{file}, file.getAbsolutePath()); } @Test public void publicClassExtendsImplementationApi() throws Exception { File file = TestUtils.createCheckFile("publicClassExtendsImplementationApi", Arrays.asList( "package com.azure;", "import com.azure.implementation.ImplementationClass;", "public class MyClass extends ImplementationClass {", "}" )); String[] expected = { expectedErrorMessage(3, 30, String.format(EXTENDS_TYPE_ERROR, "ImplementationClass")) }; verify(checker, new File[]{file}, file.getAbsolutePath(), expected); } @Test public void nonPublicClassExtendsImplementationApi() throws Exception { File file = TestUtils.createCheckFile("nonPublicClassExtendsImplementationApi", Arrays.asList( "package com.azure;", "import com.azure.implementation.ImplementationClass;", "class MyClass extends ImplementationClass {", "}" )); verify(checker, new File[]{file}, file.getAbsolutePath()); } @Test public void publicInterfaceImplementsPublicApi() throws Exception { File file = TestUtils.createCheckFile("publicInterfaceImplementsPublicApi", Arrays.asList( "package com.azure;", "import com.azure.PublicClass;", "public interface MyInterface extends PublicClass {", "}" )); verify(checker, new File[]{file}, file.getAbsolutePath()); } @Test public void publicInterfaceImplementsImplementationApi() throws Exception { File file = TestUtils.createCheckFile("publicInterfaceImplementsImplementationApi", Arrays.asList( "package com.azure;", "import com.azure.implementation.ImplementationClass;", "public interface MyInterface extends ImplementationClass {", "}" )); String[] expected = { expectedErrorMessage(3, 38, String.format(EXTENDS_TYPE_ERROR, "ImplementationClass")) }; verify(checker, new File[]{file}, file.getAbsolutePath(), expected); } @Test public void nonPublicInterfaceImplementsImplementationApi() throws Exception { File file = TestUtils.createCheckFile("nonPublicInterfaceImplementsImplementationApi", Arrays.asList( "package com.azure;", "import com.azure.implementation.ImplementationClass;", "interface MyInterface extends ImplementationClass {", "}" )); verify(checker, new File[]{file}, file.getAbsolutePath()); } @Test public void publicEnumImplementsPublicApi() throws Exception { File file = TestUtils.createCheckFile("publicEnumImplementsPublicApi", Arrays.asList( "package com.azure;", "import com.azure.PublicClass;", "public enum MyEnum implements PublicClass {", "}" )); verify(checker, new File[]{file}, file.getAbsolutePath()); } @Test public void publicEnumImplementsImplementationApi() throws Exception { File file = TestUtils.createCheckFile("publicEnumImplementsImplementationApi", Arrays.asList( "package com.azure;", "import com.azure.implementation.ImplementationClass;", "public enum MyEnum implements ImplementationClass {", "}" )); String[] expected = { expectedErrorMessage(3, 31, String.format(IMPLEMENTS_TYPE_ERROR, "ImplementationClass")) }; verify(checker, new File[]{file}, file.getAbsolutePath(), expected); } @Test public void nonPublicEnumImplementsImplementationApi() throws Exception { File file = TestUtils.createCheckFile("nonPublicEnumImplementsImplementationApi", Arrays.asList( "package com.azure;", "import com.azure.implementation.ImplementationClass;", "enum MyEnum implements ImplementationClass {", "}" )); verify(checker, new File[]{file}, file.getAbsolutePath()); } @Test public void publicClassUsesPublicApiTypeParam() throws Exception { File file = TestUtils.createCheckFile("publicClassUsesPublicApiTypeParam", Arrays.asList( "package com.azure;", "import com.azure.PublicClass;", "public class MyClass<PublicClass> {", "}" )); verify(checker, new File[]{file}, file.getAbsolutePath()); } @Test public void publicClassUsesImplementationApiTypeParam() throws Exception { File file = TestUtils.createCheckFile("publicClassUsesImplementationApiTypeParam", Arrays.asList( "package com.azure;", "import com.azure.implementation.ImplementationClass;", "public class MyClass<ImplementationClass> {", "}" )); String[] expected = { expectedErrorMessage(3, 22, String.format(TYPE_PARAM_TYPE_ERROR, "ImplementationClass")) }; verify(checker, new File[]{file}, file.getAbsolutePath(), expected); } @Test public void nonPublicClassUsesImplementationApiTypeParam() throws Exception { File file = TestUtils.createCheckFile("nonPublicClassUsesImplementationApiTypeParam", Arrays.asList( "package com.azure;", "import com.azure.implementation.ImplementationClass;", "class MyClass<ImplementationClass> {", "}" )); verify(checker, new File[]{file}, file.getAbsolutePath()); } @Test public void publicClassUsesPublicApiUpperBoundTypeParam() throws Exception { File file = TestUtils.createCheckFile("publicClassUsesPublicApiUpperBoundTypeParam", Arrays.asList( "package com.azure;", "import com.azure.PublicClass;", "public class MyClass<A extends PublicClass> {", "}" )); verify(checker, new File[]{file}, file.getAbsolutePath()); } @Test public void publicClassUsesImplementationApiUpperBoundTypeParam() throws Exception { File file = TestUtils.createCheckFile("publicClassUsesImplementationApiUpperBoundTypeParam", Arrays.asList( "package com.azure;", "import com.azure.implementation.ImplementationClass;", "public class MyClass<A extends ImplementationClass> {", "}" )); String[] expected = { expectedErrorMessage(3, 22, String.format(TYPE_PARAM_TYPE_ERROR, "ImplementationClass")) }; verify(checker, new File[]{file}, file.getAbsolutePath(), expected); } @Test public void nonPublicClassUsesImplementationApiUpperBoundTypeParam() throws Exception { File file = TestUtils.createCheckFile("nonPublicClassUsesImplementationApiUpperBoundTypeParam", Arrays.asList( "package com.azure;", "import com.azure.implementation.ImplementationClass;", "class MyClass<A extends ImplementationClass> {", "}" )); verify(checker, new File[]{file}, file.getAbsolutePath()); } @Test public void publicInterfaceUsesPublicApiTypeParam() throws Exception { File file = TestUtils.createCheckFile("publicInterfaceUsesPublicApiTypeParam", Arrays.asList( "package com.azure;", "import com.azure.PublicClass;", "public interface MyInterface<PublicClass> {", "}" )); verify(checker, new File[]{file}, file.getAbsolutePath()); } @Test public void publicInterfaceUsesImplementationApiTypeParam() throws Exception { File file = TestUtils.createCheckFile("publicInterfaceUsesImplementationApiTypeParam", Arrays.asList( "package com.azure;", "import com.azure.implementation.ImplementationClass;", "public interface MyInterface<ImplementationClass> {", "}" )); String[] expected = { expectedErrorMessage(3, 30, String.format(TYPE_PARAM_TYPE_ERROR, "ImplementationClass")) }; verify(checker, new File[]{file}, file.getAbsolutePath(), expected); } @Test public void nonPublicInterfaceUsesImplementationApiTypeParam() throws Exception { File file = TestUtils.createCheckFile("nonPublicInterfaceUsesImplementationApiTypeParam", Arrays.asList( "package com.azure;", "import com.azure.implementation.ImplementationClass;", "interface MyInterface<ImplementationClass> {", "}" )); verify(checker, new File[]{file}, file.getAbsolutePath()); } @Test public void publicInterfaceUsesPublicApiUpperBoundTypeParam() throws Exception { File file = TestUtils.createCheckFile("publicInterfaceUsesPublicApiUpperBoundTypeParam", Arrays.asList( "package com.azure;", "import com.azure.PublicClass;", "public interface MyInterface<A extends PublicClass> {", "}" )); verify(checker, new File[]{file}, file.getAbsolutePath()); } @Test public void publicInterfaceUsesImplementationApiUpperBoundTypeParam() throws Exception { File file = TestUtils.createCheckFile("publicInterfaceUsesImplementationApiUpperBoundTypeParam", Arrays.asList( "package com.azure;", "import com.azure.implementation.ImplementationClass;", "public interface MyInterface<A extends ImplementationClass> {", "}" )); String[] expected = { expectedErrorMessage(3, 30, String.format(TYPE_PARAM_TYPE_ERROR, "ImplementationClass")) }; verify(checker, new File[]{file}, file.getAbsolutePath(), expected); } @Test public void nonPublicInterfaceUsesImplementationApiUpperBoundTypeParam() throws Exception { File file = TestUtils.createCheckFile("nonPublicInterfaceUsesImplementationApiUpperBoundTypeParam", Arrays.asList( "package com.azure;", "import com.azure.implementation.ImplementationClass;", "interface MyInterface<A extends ImplementationClass> {", "}" )); verify(checker, new File[]{file}, file.getAbsolutePath()); } private String expectedErrorMessage(int line, int column, String error) { return String.format("%d:%d: %s", line, column, error); } }
class NoImplInPublicApiTest extends AbstractModuleTestSupport { private Checker checker; @Before public void prepare() throws Exception { checker = createChecker(createModuleConfig(NoImplInPublicAPI.class)); } @After public void cleanup() { checker.destroy(); } @Override protected String getPackageLocation() { return "com/azure/tools/checkstyle/checks/NoImplInPublicApiCheck"; } @Test public void staticInitializerDoesNotCountAsApi() throws Exception { String[] expected = new String[0]; verify(checker, getPath("StaticInitializer.java"), expected); } @Test public void gettersNeedToCheckForImplementation() throws Exception { String[] expected = { expectedErrorMessage(42, 12, String.format(RETURN_TYPE_ERROR, "AnImplementationClass")), expectedErrorMessage(46, 15, String.format(RETURN_TYPE_ERROR, "AnImplementationClass")), expectedErrorMessage(59, 36, String.format(RETURN_TYPE_ERROR, "com.azure.implementation.AnImplementationClass")), expectedErrorMessage(63, 39, String.format(RETURN_TYPE_ERROR, "com.azure.implementation.AnImplementationClass")) }; verify(checker, getPath("Getters.java"), expected); } @Test @Test public void publicClassImplementsImplementationApi() throws Exception { File file = TestUtils.createCheckFile("publicClassImplementsImplementationApi", Arrays.asList( "package com.azure;", "import com.azure.implementation.ImplementationClass;", "public class MyClass implements ImplementationClass {", "}" )); String[] expected = { expectedErrorMessage(3, 33, String.format(IMPLEMENTS_TYPE_ERROR, "ImplementationClass")) }; verify(checker, new File[]{file}, file.getAbsolutePath(), expected); } @Test public void nonPublicClassImplementsImplementationApi() throws Exception { File file = TestUtils.createCheckFile("nonPublicClassImplementsImplementationApi", Arrays.asList( "package com.azure;", "import com.azure.implementation.ImplementationClass;", "class MyClass implements ImplementationClass {", "}" )); verify(checker, new File[]{file}, file.getAbsolutePath()); } @Test public void publicClassExtendsPublicApi() throws Exception { File file = TestUtils.createCheckFile("publicClassExtendsPublicApi", Arrays.asList( "package com.azure;", "import com.azure.PublicClass;", "public class MyClass extends PublicClass {", "}" )); verify(checker, new File[]{file}, file.getAbsolutePath()); } @Test public void publicClassExtendsImplementationApi() throws Exception { File file = TestUtils.createCheckFile("publicClassExtendsImplementationApi", Arrays.asList( "package com.azure;", "import com.azure.implementation.ImplementationClass;", "public class MyClass extends ImplementationClass {", "}" )); String[] expected = { expectedErrorMessage(3, 30, String.format(EXTENDS_TYPE_ERROR, "ImplementationClass")) }; verify(checker, new File[]{file}, file.getAbsolutePath(), expected); } @Test public void nonPublicClassExtendsImplementationApi() throws Exception { File file = TestUtils.createCheckFile("nonPublicClassExtendsImplementationApi", Arrays.asList( "package com.azure;", "import com.azure.implementation.ImplementationClass;", "class MyClass extends ImplementationClass {", "}" )); verify(checker, new File[]{file}, file.getAbsolutePath()); } @Test public void publicInterfaceImplementsPublicApi() throws Exception { File file = TestUtils.createCheckFile("publicInterfaceImplementsPublicApi", Arrays.asList( "package com.azure;", "import com.azure.PublicClass;", "public interface MyInterface extends PublicClass {", "}" )); verify(checker, new File[]{file}, file.getAbsolutePath()); } @Test public void publicInterfaceImplementsImplementationApi() throws Exception { File file = TestUtils.createCheckFile("publicInterfaceImplementsImplementationApi", Arrays.asList( "package com.azure;", "import com.azure.implementation.ImplementationClass;", "public interface MyInterface extends ImplementationClass {", "}" )); String[] expected = { expectedErrorMessage(3, 38, String.format(EXTENDS_TYPE_ERROR, "ImplementationClass")) }; verify(checker, new File[]{file}, file.getAbsolutePath(), expected); } @Test public void nonPublicInterfaceImplementsImplementationApi() throws Exception { File file = TestUtils.createCheckFile("nonPublicInterfaceImplementsImplementationApi", Arrays.asList( "package com.azure;", "import com.azure.implementation.ImplementationClass;", "interface MyInterface extends ImplementationClass {", "}" )); verify(checker, new File[]{file}, file.getAbsolutePath()); } @Test public void publicEnumImplementsPublicApi() throws Exception { File file = TestUtils.createCheckFile("publicEnumImplementsPublicApi", Arrays.asList( "package com.azure;", "import com.azure.PublicClass;", "public enum MyEnum implements PublicClass {", "}" )); verify(checker, new File[]{file}, file.getAbsolutePath()); } @Test public void publicEnumImplementsImplementationApi() throws Exception { File file = TestUtils.createCheckFile("publicEnumImplementsImplementationApi", Arrays.asList( "package com.azure;", "import com.azure.implementation.ImplementationClass;", "public enum MyEnum implements ImplementationClass {", "}" )); String[] expected = { expectedErrorMessage(3, 31, String.format(IMPLEMENTS_TYPE_ERROR, "ImplementationClass")) }; verify(checker, new File[]{file}, file.getAbsolutePath(), expected); } @Test public void nonPublicEnumImplementsImplementationApi() throws Exception { File file = TestUtils.createCheckFile("nonPublicEnumImplementsImplementationApi", Arrays.asList( "package com.azure;", "import com.azure.implementation.ImplementationClass;", "enum MyEnum implements ImplementationClass {", "}" )); verify(checker, new File[]{file}, file.getAbsolutePath()); } @Test public void publicClassUsesPublicApiTypeParam() throws Exception { File file = TestUtils.createCheckFile("publicClassUsesPublicApiTypeParam", Arrays.asList( "package com.azure;", "import com.azure.PublicClass;", "public class MyClass<PublicClass> {", "}" )); verify(checker, new File[]{file}, file.getAbsolutePath()); } @Test public void publicClassUsesImplementationApiTypeParam() throws Exception { File file = TestUtils.createCheckFile("publicClassUsesImplementationApiTypeParam", Arrays.asList( "package com.azure;", "import com.azure.implementation.ImplementationClass;", "public class MyClass<ImplementationClass> {", "}" )); String[] expected = { expectedErrorMessage(3, 22, String.format(TYPE_PARAM_TYPE_ERROR, "ImplementationClass")) }; verify(checker, new File[]{file}, file.getAbsolutePath(), expected); } @Test public void nonPublicClassUsesImplementationApiTypeParam() throws Exception { File file = TestUtils.createCheckFile("nonPublicClassUsesImplementationApiTypeParam", Arrays.asList( "package com.azure;", "import com.azure.implementation.ImplementationClass;", "class MyClass<ImplementationClass> {", "}" )); verify(checker, new File[]{file}, file.getAbsolutePath()); } @Test public void publicClassUsesPublicApiUpperBoundTypeParam() throws Exception { File file = TestUtils.createCheckFile("publicClassUsesPublicApiUpperBoundTypeParam", Arrays.asList( "package com.azure;", "import com.azure.PublicClass;", "public class MyClass<A extends PublicClass> {", "}" )); verify(checker, new File[]{file}, file.getAbsolutePath()); } @Test public void publicClassUsesImplementationApiUpperBoundTypeParam() throws Exception { File file = TestUtils.createCheckFile("publicClassUsesImplementationApiUpperBoundTypeParam", Arrays.asList( "package com.azure;", "import com.azure.implementation.ImplementationClass;", "public class MyClass<A extends ImplementationClass> {", "}" )); String[] expected = { expectedErrorMessage(3, 22, String.format(TYPE_PARAM_TYPE_ERROR, "ImplementationClass")) }; verify(checker, new File[]{file}, file.getAbsolutePath(), expected); } @Test public void nonPublicClassUsesImplementationApiUpperBoundTypeParam() throws Exception { File file = TestUtils.createCheckFile("nonPublicClassUsesImplementationApiUpperBoundTypeParam", Arrays.asList( "package com.azure;", "import com.azure.implementation.ImplementationClass;", "class MyClass<A extends ImplementationClass> {", "}" )); verify(checker, new File[]{file}, file.getAbsolutePath()); } @Test public void publicInterfaceUsesPublicApiTypeParam() throws Exception { File file = TestUtils.createCheckFile("publicInterfaceUsesPublicApiTypeParam", Arrays.asList( "package com.azure;", "import com.azure.PublicClass;", "public interface MyInterface<PublicClass> {", "}" )); verify(checker, new File[]{file}, file.getAbsolutePath()); } @Test public void publicInterfaceUsesImplementationApiTypeParam() throws Exception { File file = TestUtils.createCheckFile("publicInterfaceUsesImplementationApiTypeParam", Arrays.asList( "package com.azure;", "import com.azure.implementation.ImplementationClass;", "public interface MyInterface<ImplementationClass> {", "}" )); String[] expected = { expectedErrorMessage(3, 30, String.format(TYPE_PARAM_TYPE_ERROR, "ImplementationClass")) }; verify(checker, new File[]{file}, file.getAbsolutePath(), expected); } @Test public void nonPublicInterfaceUsesImplementationApiTypeParam() throws Exception { File file = TestUtils.createCheckFile("nonPublicInterfaceUsesImplementationApiTypeParam", Arrays.asList( "package com.azure;", "import com.azure.implementation.ImplementationClass;", "interface MyInterface<ImplementationClass> {", "}" )); verify(checker, new File[]{file}, file.getAbsolutePath()); } @Test public void publicInterfaceUsesPublicApiUpperBoundTypeParam() throws Exception { File file = TestUtils.createCheckFile("publicInterfaceUsesPublicApiUpperBoundTypeParam", Arrays.asList( "package com.azure;", "import com.azure.PublicClass;", "public interface MyInterface<A extends PublicClass> {", "}" )); verify(checker, new File[]{file}, file.getAbsolutePath()); } @Test public void publicInterfaceUsesImplementationApiUpperBoundTypeParam() throws Exception { File file = TestUtils.createCheckFile("publicInterfaceUsesImplementationApiUpperBoundTypeParam", Arrays.asList( "package com.azure;", "import com.azure.implementation.ImplementationClass;", "public interface MyInterface<A extends ImplementationClass> {", "}" )); String[] expected = { expectedErrorMessage(3, 30, String.format(TYPE_PARAM_TYPE_ERROR, "ImplementationClass")) }; verify(checker, new File[]{file}, file.getAbsolutePath(), expected); } @Test public void nonPublicInterfaceUsesImplementationApiUpperBoundTypeParam() throws Exception { File file = TestUtils.createCheckFile("nonPublicInterfaceUsesImplementationApiUpperBoundTypeParam", Arrays.asList( "package com.azure;", "import com.azure.implementation.ImplementationClass;", "interface MyInterface<A extends ImplementationClass> {", "}" )); verify(checker, new File[]{file}, file.getAbsolutePath()); } @Test public void implementationPackageIsANoOp() throws Exception { File file = TestUtils.createCheckFile("implementationPackageIsANoOp", Arrays.asList( "package com.azure.implementation;", "import com.azure.implementation.ImplementationClass;", "public class MyClass extends ImplementationClass {", "}" )); verify(checker, new File[]{file}, file.getAbsolutePath()); } private String expectedErrorMessage(int line, int column, String error) { return String.format("%d:%d: %s", line, column, error); } }
Why don't we use beanFactory.containsBean here?
public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException { if (bean instanceof AbstractAzureHttpClientBuilderFactory) { try { HttpPipelinePolicy policy = beanFactory.getBean(DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME, HttpPipelinePolicy.class); AbstractAzureHttpClientBuilderFactory builderFactory = (AbstractAzureHttpClientBuilderFactory) bean; builderFactory.addHttpPipelinePolicy(policy); LOGGER.debug("Added the Sleuth http pipeline policy to {} builder.", bean.getClass()); } catch (BeansException exception) { LOGGER.warn("Not found the Sleuth http pipeline policy for {} builder.", bean.getClass()); } } return bean; }
} catch (BeansException exception) {
public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException { if (!(bean instanceof AbstractAzureHttpClientBuilderFactory)) { return bean; } if (beanFactory.containsBean(DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME)) { HttpPipelinePolicy policy = (HttpPipelinePolicy) beanFactory.getBean(DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME); AbstractAzureHttpClientBuilderFactory builderFactory = (AbstractAzureHttpClientBuilderFactory) bean; builderFactory.addHttpPipelinePolicy(policy); LOGGER.debug("Added the Sleuth http pipeline policy to {} builder.", bean.getClass()); } return bean; }
class AzureHttpClientBuilderFactoryBeanPostProcessor implements BeanPostProcessor, Ordered, BeanFactoryAware { private static final Logger LOGGER = LoggerFactory.getLogger(AzureHttpClientBuilderFactoryBeanPostProcessor.class); public static final String DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME = "sleuthHttpPolicy"; private BeanFactory beanFactory; @Override public int getOrder() { return LOWEST_PRECEDENCE; } @Override public void setBeanFactory(BeanFactory beanFactory) { this.beanFactory = beanFactory; } @Override @SuppressWarnings({ "rawtypes", "unchecked" }) }
class AzureHttpClientBuilderFactoryBeanPostProcessor implements BeanPostProcessor, Ordered, BeanFactoryAware { private static final Logger LOGGER = LoggerFactory.getLogger(AzureHttpClientBuilderFactoryBeanPostProcessor.class); public static final String DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME = "AzureSleuthHttpPolicy"; private BeanFactory beanFactory; @Override public int getOrder() { return LOWEST_PRECEDENCE; } @Override public void setBeanFactory(BeanFactory beanFactory) { this.beanFactory = beanFactory; } @Override @SuppressWarnings({ "rawtypes"}) }
Can we change here to ```java if (!bean instanceof xxx) { return bean; } ```
public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException { if (bean instanceof AbstractAzureHttpClientBuilderFactory && beanFactory.containsBean(DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME)) { HttpPipelinePolicy policy = (HttpPipelinePolicy) beanFactory.getBean(DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME); AbstractAzureHttpClientBuilderFactory builderFactory = (AbstractAzureHttpClientBuilderFactory) bean; builderFactory.addHttpPipelinePolicy(policy); LOGGER.debug("Added the Sleuth http pipeline policy to {} builder.", bean.getClass()); } return bean; }
if (bean instanceof AbstractAzureHttpClientBuilderFactory
public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException { if (!(bean instanceof AbstractAzureHttpClientBuilderFactory)) { return bean; } if (beanFactory.containsBean(DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME)) { HttpPipelinePolicy policy = (HttpPipelinePolicy) beanFactory.getBean(DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME); AbstractAzureHttpClientBuilderFactory builderFactory = (AbstractAzureHttpClientBuilderFactory) bean; builderFactory.addHttpPipelinePolicy(policy); LOGGER.debug("Added the Sleuth http pipeline policy to {} builder.", bean.getClass()); } return bean; }
class AzureHttpClientBuilderFactoryBeanPostProcessor implements BeanPostProcessor, Ordered, BeanFactoryAware { private static final Logger LOGGER = LoggerFactory.getLogger(AzureHttpClientBuilderFactoryBeanPostProcessor.class); public static final String DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME = "sleuthHttpPolicy"; private BeanFactory beanFactory; @Override public int getOrder() { return LOWEST_PRECEDENCE; } @Override public void setBeanFactory(BeanFactory beanFactory) { this.beanFactory = beanFactory; } @Override @SuppressWarnings({ "rawtypes", "unchecked" }) }
class AzureHttpClientBuilderFactoryBeanPostProcessor implements BeanPostProcessor, Ordered, BeanFactoryAware { private static final Logger LOGGER = LoggerFactory.getLogger(AzureHttpClientBuilderFactoryBeanPostProcessor.class); public static final String DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME = "AzureSleuthHttpPolicy"; private BeanFactory beanFactory; @Override public int getOrder() { return LOWEST_PRECEDENCE; } @Override public void setBeanFactory(BeanFactory beanFactory) { this.beanFactory = beanFactory; } @Override @SuppressWarnings({ "rawtypes"}) }
Actually, have you tried that if there's no sleuth pipeline bean in the beanFactory, how many times will this warning log be printed?
public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException { if (!(bean instanceof AbstractAzureHttpClientBuilderFactory)) { return bean; } if (beanFactory.containsBean(DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME)) { HttpPipelinePolicy policy = (HttpPipelinePolicy) beanFactory.getBean(DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME); AbstractAzureHttpClientBuilderFactory builderFactory = (AbstractAzureHttpClientBuilderFactory) bean; builderFactory.addHttpPipelinePolicy(policy); LOGGER.debug("Added the Sleuth http pipeline policy to {} builder.", bean.getClass()); } else { LOGGER.warn("Not found the Sleuth http pipeline policy for {} builder.", bean.getClass()); } return bean; }
LOGGER.warn("Not found the Sleuth http pipeline policy for {} builder.", bean.getClass());
public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException { if (!(bean instanceof AbstractAzureHttpClientBuilderFactory)) { return bean; } if (beanFactory.containsBean(DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME)) { HttpPipelinePolicy policy = (HttpPipelinePolicy) beanFactory.getBean(DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME); AbstractAzureHttpClientBuilderFactory builderFactory = (AbstractAzureHttpClientBuilderFactory) bean; builderFactory.addHttpPipelinePolicy(policy); LOGGER.debug("Added the Sleuth http pipeline policy to {} builder.", bean.getClass()); } return bean; }
class AzureHttpClientBuilderFactoryBeanPostProcessor implements BeanPostProcessor, Ordered, BeanFactoryAware { private static final Logger LOGGER = LoggerFactory.getLogger(AzureHttpClientBuilderFactoryBeanPostProcessor.class); public static final String DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME = "sleuthHttpPolicy"; private BeanFactory beanFactory; @Override public int getOrder() { return LOWEST_PRECEDENCE; } @Override public void setBeanFactory(BeanFactory beanFactory) { this.beanFactory = beanFactory; } @Override @SuppressWarnings({ "rawtypes"}) }
class AzureHttpClientBuilderFactoryBeanPostProcessor implements BeanPostProcessor, Ordered, BeanFactoryAware { private static final Logger LOGGER = LoggerFactory.getLogger(AzureHttpClientBuilderFactoryBeanPostProcessor.class); public static final String DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME = "AzureSleuthHttpPolicy"; private BeanFactory beanFactory; @Override public int getOrder() { return LOWEST_PRECEDENCE; } @Override public void setBeanFactory(BeanFactory beanFactory) { this.beanFactory = beanFactory; } @Override @SuppressWarnings({ "rawtypes"}) }
Consider catching and handling relevant storage code, sample here https://github.com/Azure/azure-sdk-for-java/blob/aff24131127ed85da1e76ed7cb4cccd9a25279c0/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/APISpec.groovy#L128 .
public Stream<StorageItem> listItems(String itemPrefix) { ListBlobsOptions options = new ListBlobsOptions(); options.setPrefix(itemPrefix); options.setDetails(RETRIEVE_NOTHING_DETAILS); BlobContainerClient containerClient = getBlobServiceClient().getBlobContainerClient(name); if (containerClient.exists()) { return containerClient.listBlobs(options, null) .stream() .map(blob -> new StorageItem(name, blob.getName(), getStorageType())); } else { return Stream.empty(); } }
if (containerClient.exists()) {
public Stream<StorageItem> listItems(String itemPrefix) { ListBlobsOptions options = new ListBlobsOptions(); options.setPrefix(itemPrefix); options.setDetails(RETRIEVE_NOTHING_DETAILS); BlobContainerClient containerClient = getBlobServiceClient().getBlobContainerClient(name); if (containerClient.exists()) { return containerClient.listBlobs(options, null) .stream() .map(blob -> new StorageItem(name, blob.getName(), getStorageType())); } else { return Stream.empty(); } }
class StorageBlobContainerClient implements StorageContainerClient { private final String name; StorageBlobContainerClient(String name) { this.name = name; } @Override public String getName() { return name; } @Override }
class StorageBlobContainerClient implements StorageContainerClient { private final String name; StorageBlobContainerClient(String name) { this.name = name; } @Override public String getName() { return name; } @Override }
Do we need to mention "Azure Text Analytics"? also "model" should not be capitalized. One suggestion is: ```suggestion System.out.printf("Results of entities recognition has been computed with model version: %s%n", ```
public static void main(String[] args) { TextAnalyticsClient client = new TextAnalyticsClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("{endpoint}") .buildClient(); List<TextDocumentInput> documents = Arrays.asList( new TextDocumentInput("A", "Satya Nadella is the CEO of Microsoft.").setLanguage("en"), new TextDocumentInput("B", "Elon Musk is the CEO of SpaceX and Tesla.").setLanguage("en") ); TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions() .setModelVersion("latest"); RecognizeEntitiesResultCollection recognizeEntitiesResultCollection = client.recognizeEntitiesBatchWithResponse( documents, requestOptions, Context.NONE).getValue(); System.out.printf("Results of Azure Text Analytics \"Entities Recognition\" Model, version: %s%n", recognizeEntitiesResultCollection.getModelVersion()); processRecognizeEntitiesResultCollection(recognizeEntitiesResultCollection); RecognizeEntitiesAction recognizeEntitiesAction = new RecognizeEntitiesAction().setModelVersion("latest"); SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller = client.beginAnalyzeActions(documents, new TextAnalyticsActions().setDisplayName("{tasks_display_name}") .setRecognizeEntitiesActions(recognizeEntitiesAction), new AnalyzeActionsOptions(), Context.NONE); syncPoller.waitForCompletion(); syncPoller.getFinalResult().forEach(actionsResult -> processAnalyzeActionsResult(actionsResult)); }
System.out.printf("Results of Azure Text Analytics \"Entities Recognition\" Model, version: %s%n",
public static void main(String[] args) { TextAnalyticsClient client = new TextAnalyticsClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("{endpoint}") .buildClient(); List<TextDocumentInput> documents = Arrays.asList( new TextDocumentInput("A", "Satya Nadella is the CEO of Microsoft.").setLanguage("en"), new TextDocumentInput("B", "Elon Musk is the CEO of SpaceX and Tesla.").setLanguage("en") ); TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions() .setModelVersion("latest"); RecognizeEntitiesResultCollection recognizeEntitiesResultCollection = client.recognizeEntitiesBatchWithResponse( documents, requestOptions, Context.NONE).getValue(); System.out.printf("Results of entities recognition has been computed with model version: %s%n", recognizeEntitiesResultCollection.getModelVersion()); processRecognizeEntitiesResultCollection(recognizeEntitiesResultCollection); RecognizeEntitiesAction recognizeEntitiesAction = new RecognizeEntitiesAction().setModelVersion("latest"); SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller = client.beginAnalyzeActions(documents, new TextAnalyticsActions().setDisplayName("{tasks_display_name}") .setRecognizeEntitiesActions(recognizeEntitiesAction), new AnalyzeActionsOptions(), Context.NONE); syncPoller.waitForCompletion(); syncPoller.getFinalResult().forEach(actionsResult -> processAnalyzeActionsResult(actionsResult)); }
class ModelVersion { private static void processAnalyzeActionsResult(AnalyzeActionsResult actionsResult) { System.out.println("Entities recognition action results:"); for (RecognizeEntitiesActionResult actionResult : actionsResult.getRecognizeEntitiesResults()) { if (!actionResult.isError()) { processRecognizeEntitiesResultCollection(actionResult.getDocumentsResults()); } else { System.out.printf("\tCannot execute Entities Recognition action. Error: %s%n", actionResult.getError().getMessage()); } } } private static void processRecognizeEntitiesResultCollection(RecognizeEntitiesResultCollection resultCollection) { for (RecognizeEntitiesResult documentResult : resultCollection) { if (!documentResult.isError()) { for (CategorizedEntity entity : documentResult.getEntities()) { System.out.printf("\tText: %s, category: %s, confidence score: %f.%n", entity.getText(), entity.getCategory(), entity.getConfidenceScore()); } } else { System.out.printf("\tCannot recognize entities. Error: %s%n", documentResult.getError().getMessage()); } } } }
class ModelVersion { private static void processAnalyzeActionsResult(AnalyzeActionsResult actionsResult) { System.out.println("Entities recognition action results:"); for (RecognizeEntitiesActionResult actionResult : actionsResult.getRecognizeEntitiesResults()) { if (!actionResult.isError()) { processRecognizeEntitiesResultCollection(actionResult.getDocumentsResults()); } else { System.out.printf("\tCannot execute Entities Recognition action. Error: %s%n", actionResult.getError().getMessage()); } } } private static void processRecognizeEntitiesResultCollection(RecognizeEntitiesResultCollection resultCollection) { for (RecognizeEntitiesResult documentResult : resultCollection) { if (!documentResult.isError()) { for (CategorizedEntity entity : documentResult.getEntities()) { System.out.printf("\tText: %s, category: %s, confidence score: %f.%n", entity.getText(), entity.getCategory(), entity.getConfidenceScore()); } } else { System.out.printf("\tCannot recognize entities. Error: %s%n", documentResult.getError().getMessage()); } } } }
Is it true that if the user doesn't specify a model version, the backend would still set it to `latest`?
public static void main(String[] args) { TextAnalyticsClient client = new TextAnalyticsClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("{endpoint}") .buildClient(); List<TextDocumentInput> documents = Arrays.asList( new TextDocumentInput("A", "Satya Nadella is the CEO of Microsoft.").setLanguage("en"), new TextDocumentInput("B", "Elon Musk is the CEO of SpaceX and Tesla.").setLanguage("en") ); TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions() .setModelVersion("latest"); RecognizeEntitiesResultCollection recognizeEntitiesResultCollection = client.recognizeEntitiesBatchWithResponse( documents, requestOptions, Context.NONE).getValue(); System.out.printf("Results of entities recognition has been computed with model version: %s%n", recognizeEntitiesResultCollection.getModelVersion()); processRecognizeEntitiesResultCollection(recognizeEntitiesResultCollection); RecognizeEntitiesAction recognizeEntitiesAction = new RecognizeEntitiesAction().setModelVersion("latest"); SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller = client.beginAnalyzeActions(documents, new TextAnalyticsActions().setDisplayName("{tasks_display_name}") .setRecognizeEntitiesActions(recognizeEntitiesAction), new AnalyzeActionsOptions(), Context.NONE); syncPoller.waitForCompletion(); syncPoller.getFinalResult().forEach(actionsResult -> processAnalyzeActionsResult(actionsResult)); }
.setModelVersion("latest");
public static void main(String[] args) { TextAnalyticsClient client = new TextAnalyticsClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("{endpoint}") .buildClient(); List<TextDocumentInput> documents = Arrays.asList( new TextDocumentInput("A", "Satya Nadella is the CEO of Microsoft.").setLanguage("en"), new TextDocumentInput("B", "Elon Musk is the CEO of SpaceX and Tesla.").setLanguage("en") ); TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions() .setModelVersion("latest"); RecognizeEntitiesResultCollection recognizeEntitiesResultCollection = client.recognizeEntitiesBatchWithResponse( documents, requestOptions, Context.NONE).getValue(); System.out.printf("Results of entities recognition has been computed with model version: %s%n", recognizeEntitiesResultCollection.getModelVersion()); processRecognizeEntitiesResultCollection(recognizeEntitiesResultCollection); RecognizeEntitiesAction recognizeEntitiesAction = new RecognizeEntitiesAction().setModelVersion("latest"); SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller = client.beginAnalyzeActions(documents, new TextAnalyticsActions().setDisplayName("{tasks_display_name}") .setRecognizeEntitiesActions(recognizeEntitiesAction), new AnalyzeActionsOptions(), Context.NONE); syncPoller.waitForCompletion(); syncPoller.getFinalResult().forEach(actionsResult -> processAnalyzeActionsResult(actionsResult)); }
class ModelVersion { private static void processAnalyzeActionsResult(AnalyzeActionsResult actionsResult) { System.out.println("Entities recognition action results:"); for (RecognizeEntitiesActionResult actionResult : actionsResult.getRecognizeEntitiesResults()) { if (!actionResult.isError()) { processRecognizeEntitiesResultCollection(actionResult.getDocumentsResults()); } else { System.out.printf("\tCannot execute Entities Recognition action. Error: %s%n", actionResult.getError().getMessage()); } } } private static void processRecognizeEntitiesResultCollection(RecognizeEntitiesResultCollection resultCollection) { for (RecognizeEntitiesResult documentResult : resultCollection) { if (!documentResult.isError()) { for (CategorizedEntity entity : documentResult.getEntities()) { System.out.printf("\tText: %s, category: %s, confidence score: %f.%n", entity.getText(), entity.getCategory(), entity.getConfidenceScore()); } } else { System.out.printf("\tCannot recognize entities. Error: %s%n", documentResult.getError().getMessage()); } } } }
class ModelVersion { private static void processAnalyzeActionsResult(AnalyzeActionsResult actionsResult) { System.out.println("Entities recognition action results:"); for (RecognizeEntitiesActionResult actionResult : actionsResult.getRecognizeEntitiesResults()) { if (!actionResult.isError()) { processRecognizeEntitiesResultCollection(actionResult.getDocumentsResults()); } else { System.out.printf("\tCannot execute Entities Recognition action. Error: %s%n", actionResult.getError().getMessage()); } } } private static void processRecognizeEntitiesResultCollection(RecognizeEntitiesResultCollection resultCollection) { for (RecognizeEntitiesResult documentResult : resultCollection) { if (!documentResult.isError()) { for (CategorizedEntity entity : documentResult.getEntities()) { System.out.printf("\tText: %s, category: %s, confidence score: %f.%n", entity.getText(), entity.getCategory(), entity.getConfidenceScore()); } } else { System.out.printf("\tCannot recognize entities. Error: %s%n", documentResult.getError().getMessage()); } } } }
Logic looks good to me, though I'd recommend using `Modifier.PRIVATE` instead of `2` as it will more clearly show the transformation being applied. ```suggestion models.getClass("CommunicationIceServer").getMethod("setUrls").setModifier(Modifier.PRIVATE); models.getClass("CommunicationIceServer").getMethod("setUsername").setModifier(Modifier.PRIVATE); models.getClass("CommunicationIceServer").getMethod("setRouteType").setModifier(Modifier.PRIVATE); models.getClass("CommunicationIceServer").getMethod("setCredential").setModifier(Modifier.PRIVATE); ```
public void customize(LibraryCustomization libraryCustomization, Logger logger) { PackageCustomization models = libraryCustomization.getPackage("com.azure.communication.networktraversal.models"); models.getClass("CommunicationIceServer").getMethod("setUrls").setModifier(2); models.getClass("CommunicationIceServer").getMethod("setUsername").setModifier(2); models.getClass("CommunicationIceServer").getMethod("setRouteType").setModifier(2); models.getClass("CommunicationIceServer").getMethod("setCredential").setModifier(2); }
models.getClass("CommunicationIceServer").getMethod("setCredential").setModifier(2);
public void customize(LibraryCustomization libraryCustomization, Logger logger) { PackageCustomization models = libraryCustomization.getPackage("com.azure.communication.networktraversal.models"); String modelToModify = "CommunicationIceServer"; models.getClass(modelToModify).getMethod("setUrls").setModifier(0); models.getClass(modelToModify).getMethod("setUsername").setModifier(0); models.getClass(modelToModify).getMethod("setRouteType").setModifier(0); models.getClass(modelToModify).getMethod("setCredential").setModifier(0); }
class CommunicationRelayCustomization extends Customization { @Override }
class CommunicationRelayCustomization extends Customization { @Override }
Use `Modifier.PRIVATE` instead of 2 to make it more readable.
public void customize(LibraryCustomization libraryCustomization, Logger logger) { PackageCustomization models = libraryCustomization.getPackage("com.azure.communication.networktraversal.models"); models.getClass("CommunicationIceServer").getMethod("setUrls").setModifier(2); models.getClass("CommunicationIceServer").getMethod("setUsername").setModifier(2); models.getClass("CommunicationIceServer").getMethod("setRouteType").setModifier(2); models.getClass("CommunicationIceServer").getMethod("setCredential").setModifier(2); }
models.getClass("CommunicationIceServer").getMethod("setUrls").setModifier(2);
public void customize(LibraryCustomization libraryCustomization, Logger logger) { PackageCustomization models = libraryCustomization.getPackage("com.azure.communication.networktraversal.models"); String modelToModify = "CommunicationIceServer"; models.getClass(modelToModify).getMethod("setUrls").setModifier(0); models.getClass(modelToModify).getMethod("setUsername").setModifier(0); models.getClass(modelToModify).getMethod("setRouteType").setModifier(0); models.getClass(modelToModify).getMethod("setCredential").setModifier(0); }
class CommunicationRelayCustomization extends Customization { @Override }
class CommunicationRelayCustomization extends Customization { @Override }
Do we have any unit tests for BulkExecutor in Java? Could a UT trying to add more than 256 items where each item is for a different PKRangeId trigger the original bug?
public Flux<CosmosBulkOperationResponse<TContext>> execute() { Integer nullableMaxConcurrentCosmosPartitions = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxConcurrentCosmosPartitions(cosmosBulkExecutionOptions); Mono<Integer> maxConcurrentCosmosPartitionsMono = nullableMaxConcurrentCosmosPartitions != null ? Mono.just(Math.max(256, nullableMaxConcurrentCosmosPartitions)) : this.container.getFeedRanges().map(ranges -> Math.max(256, ranges.size() * 2)); return maxConcurrentCosmosPartitionsMono .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(maxConcurrentCosmosPartitions -> { logger.debug("BulkExecutor.execute with MaxConcurrentPartitions: {}, Context: {}", maxConcurrentCosmosPartitions, this.operationContextText); return this.inputOperations .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .onErrorContinue((throwable, o) -> logger.error("Skipping an error operation while processing {}. Cause: {}, Context: {}", o, throwable.getMessage(), this.operationContextText)) .doOnNext((CosmosItemOperation cosmosItemOperation) -> { BulkExecutorUtil.setRetryPolicyForBulk( docClientWrapper, this.container, cosmosItemOperation, this.throttlingRetryOptions); if (cosmosItemOperation != FlushBuffersItemOperation.singleton()) { totalCount.incrementAndGet(); } logger.trace( "SetupRetryPolicy, {}, TotalCount: {}, Context: {}, {}", getItemOperationDiagnostics(cosmosItemOperation), totalCount.get(), this.operationContextText, getThreadInfo() ); }) .doOnComplete(() -> { mainSourceCompleted.set(true); long totalCountSnapshot = totalCount.get(); logger.debug("Main source completed - totalCountSnapshot, this.operationContextText); if (totalCountSnapshot == 0) { completeAllSinks(); } else { ScheduledFuture<?> scheduledFutureSnapshot = this.scheduledFutureForFlush; if (scheduledFutureSnapshot != null) { try { scheduledFutureSnapshot.cancel(true); logger.debug("Cancelled all future scheduled tasks {}", getThreadInfo()); } catch (Exception e) { logger.warn("Failed to cancel scheduled tasks{}", getThreadInfo(), e); } } this.onFlush(); long flushIntervalAfterDrainingIncomingFlux = Math.min( this.maxMicroBatchIntervalInMs, BatchRequestResponseConstants .DEFAULT_MAX_MICRO_BATCH_INTERVAL_AFTER_DRAINING_INCOMING_FLUX_IN_MILLISECONDS); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, flushIntervalAfterDrainingIncomingFlux, flushIntervalAfterDrainingIncomingFlux, TimeUnit.MILLISECONDS); } }) .mergeWith(mainSink.asFlux()) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap( operation -> { logger.trace("Before Resolve PkRangeId, {}, Context: {} {}", getItemOperationDiagnostics(operation), this.operationContextText, getThreadInfo()); return BulkExecutorUtil.resolvePartitionKeyRangeId(this.docClientWrapper, this.container, operation) .map((String pkRangeId) -> { PartitionScopeThresholds partitionScopeThresholds = this.partitionScopeThresholds.computeIfAbsent( pkRangeId, (newPkRangeId) -> new PartitionScopeThresholds(newPkRangeId, this.cosmosBulkExecutionOptions)); logger.trace("Resolved PkRangeId, {}, PKRangeId: {} Context: {} {}", getItemOperationDiagnostics(operation), pkRangeId, this.operationContextText, getThreadInfo()); return Pair.of(partitionScopeThresholds, operation); }); }) .groupBy(Pair::getKey, Pair::getValue) .flatMap( this::executePartitionedGroup, maxConcurrentCosmosPartitions) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .doOnNext(requestAndResponse -> { int totalCountAfterDecrement = totalCount.decrementAndGet(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountAfterDecrement == 0 && mainSourceCompletedSnapshot) { logger.debug("All work completed, {}, TotalCount: {}, Context: {} {}", getItemOperationDiagnostics(requestAndResponse.getOperation()), totalCountAfterDecrement, this.operationContextText, getThreadInfo()); completeAllSinks(); } else { logger.debug( "Work left - TotalCount after decrement: {}, main sink completed {}, {}, Context: {} {}", totalCountAfterDecrement, mainSourceCompletedSnapshot, getItemOperationDiagnostics(requestAndResponse.getOperation()), this.operationContextText, getThreadInfo()); } }) .doOnComplete(() -> { int totalCountSnapshot = totalCount.get(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountSnapshot == 0 && mainSourceCompletedSnapshot) { logger.debug("DoOnComplete: All work completed, Context: {}", this.operationContextText); completeAllSinks(); } else { logger.debug( "DoOnComplete: Work left - TotalCount after decrement: {}, main sink completed {}, Context: {} {}", totalCountSnapshot, mainSourceCompletedSnapshot, this.operationContextText, getThreadInfo()); } }); }); }
.groupBy(Pair::getKey, Pair::getValue)
default concurrency (256), Integer nullableMaxConcurrentCosmosPartitions = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxConcurrentCosmosPartitions(cosmosBulkExecutionOptions); Mono<Integer> maxConcurrentCosmosPartitionsMono = nullableMaxConcurrentCosmosPartitions != null ? Mono.just(Math.max(256, nullableMaxConcurrentCosmosPartitions)) : this.container.getFeedRanges().map(ranges -> Math.max(256, ranges.size() * 2)); return maxConcurrentCosmosPartitionsMono .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(maxConcurrentCosmosPartitions -> { logger.debug("BulkExecutor.execute with MaxConcurrentPartitions: {}, Context: {}", maxConcurrentCosmosPartitions, this.operationContextText); return this.inputOperations .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .onErrorContinue((throwable, o) -> logger.error("Skipping an error operation while processing {}. Cause: {}, Context: {}", o, throwable.getMessage(), this.operationContextText)) .doOnNext((CosmosItemOperation cosmosItemOperation) -> { BulkExecutorUtil.setRetryPolicyForBulk( docClientWrapper, this.container, cosmosItemOperation, this.throttlingRetryOptions); if (cosmosItemOperation != FlushBuffersItemOperation.singleton()) { totalCount.incrementAndGet(); } logger.trace( "SetupRetryPolicy, {}, TotalCount: {}, Context: {}, {}", getItemOperationDiagnostics(cosmosItemOperation), totalCount.get(), this.operationContextText, getThreadInfo() ); }) .doOnComplete(() -> { mainSourceCompleted.set(true); long totalCountSnapshot = totalCount.get(); logger.debug("Main source completed - totalCountSnapshot, this.operationContextText); if (totalCountSnapshot == 0) { completeAllSinks(); } else { ScheduledFuture<?> scheduledFutureSnapshot = this.scheduledFutureForFlush; if (scheduledFutureSnapshot != null) { try { scheduledFutureSnapshot.cancel(true); logger.debug("Cancelled all future scheduled tasks {}", getThreadInfo()); } catch (Exception e) { logger.warn("Failed to cancel scheduled tasks{}", getThreadInfo(), e); } } this.onFlush(); long flushIntervalAfterDrainingIncomingFlux = Math.min( this.maxMicroBatchIntervalInMs, BatchRequestResponseConstants .DEFAULT_MAX_MICRO_BATCH_INTERVAL_AFTER_DRAINING_INCOMING_FLUX_IN_MILLISECONDS); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, flushIntervalAfterDrainingIncomingFlux, flushIntervalAfterDrainingIncomingFlux, TimeUnit.MILLISECONDS); } }) .mergeWith(mainSink.asFlux()) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap( operation -> { logger.trace("Before Resolve PkRangeId, {}, Context: {} {}", getItemOperationDiagnostics(operation), this.operationContextText, getThreadInfo()); return BulkExecutorUtil.resolvePartitionKeyRangeId(this.docClientWrapper, this.container, operation) .map((String pkRangeId) -> { PartitionScopeThresholds partitionScopeThresholds = this.partitionScopeThresholds.computeIfAbsent( pkRangeId, (newPkRangeId) -> new PartitionScopeThresholds(newPkRangeId, this.cosmosBulkExecutionOptions)); logger.trace("Resolved PkRangeId, {}, PKRangeId: {} Context: {} {}", getItemOperationDiagnostics(operation), pkRangeId, this.operationContextText, getThreadInfo()); return Pair.of(partitionScopeThresholds, operation); }); }) .groupBy(Pair::getKey, Pair::getValue) .flatMap( this::executePartitionedGroup, maxConcurrentCosmosPartitions) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .doOnNext(requestAndResponse -> { int totalCountAfterDecrement = totalCount.decrementAndGet(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountAfterDecrement == 0 && mainSourceCompletedSnapshot) { logger.debug("All work completed, {}, TotalCount: {}, Context: {} {}", getItemOperationDiagnostics(requestAndResponse.getOperation()), totalCountAfterDecrement, this.operationContextText, getThreadInfo()); completeAllSinks(); } else { logger.debug( "Work left - TotalCount after decrement: {}, main sink completed {}, {}, Context: {} {}", totalCountAfterDecrement, mainSourceCompletedSnapshot, getItemOperationDiagnostics(requestAndResponse.getOperation()), this.operationContextText, getThreadInfo()); } }) .doOnComplete(() -> { int totalCountSnapshot = totalCount.get(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountSnapshot == 0 && mainSourceCompletedSnapshot) { logger.debug("DoOnComplete: All work completed, Context: {}", this.operationContextText); completeAllSinks(); } else { logger.debug( "DoOnComplete: Work left - TotalCount after decrement: {}, main sink completed {}, Context: {} {}", totalCountSnapshot, mainSourceCompletedSnapshot, this.operationContextText, getThreadInfo()); } }); }
class BulkExecutor<TContext> { private final static Logger logger = LoggerFactory.getLogger(BulkExecutor.class); private final static AtomicLong instanceCount = new AtomicLong(0); private final CosmosAsyncContainer container; private final AsyncDocumentClient docClientWrapper; private final String operationContextText; private final OperationContextAndListenerTuple operationListener; private final ThrottlingRetryOptions throttlingRetryOptions; private final Flux<com.azure.cosmos.models.CosmosItemOperation> inputOperations; private final Long maxMicroBatchIntervalInMs; private final TContext batchContext; private final ConcurrentMap<String, PartitionScopeThresholds> partitionScopeThresholds; private final CosmosBulkExecutionOptions cosmosBulkExecutionOptions; private final AtomicBoolean mainSourceCompleted; private final AtomicInteger totalCount; private final Sinks.EmitFailureHandler serializedEmitFailureHandler; private final Sinks.Many<CosmosItemOperation> mainSink; private final List<FluxSink<CosmosItemOperation>> groupSinks; private final ScheduledExecutorService executorService; private ScheduledFuture<?> scheduledFutureForFlush; public BulkExecutor(CosmosAsyncContainer container, Flux<CosmosItemOperation> inputOperations, CosmosBulkExecutionOptions cosmosBulkOptions) { checkNotNull(container, "expected non-null container"); checkNotNull(inputOperations, "expected non-null inputOperations"); checkNotNull(cosmosBulkOptions, "expected non-null bulkOptions"); this.cosmosBulkExecutionOptions = cosmosBulkOptions; this.container = container; this.inputOperations = inputOperations; this.docClientWrapper = CosmosBridgeInternal.getAsyncDocumentClient(container.getDatabase()); this.throttlingRetryOptions = docClientWrapper.getConnectionPolicy().getThrottlingRetryOptions(); maxMicroBatchIntervalInMs = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchInterval(cosmosBulkExecutionOptions) .toMillis(); batchContext = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getLegacyBatchScopedContext(cosmosBulkExecutionOptions); this.partitionScopeThresholds = ImplementationBridgeHelpers.CosmosBulkExecutionThresholdsStateHelper .getBulkExecutionThresholdsAccessor() .getPartitionScopeThresholds(cosmosBulkExecutionOptions.getThresholdsState()); operationListener = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getOperationContext(cosmosBulkExecutionOptions); if (operationListener != null && operationListener.getOperationContext() != null) { operationContextText = operationListener.getOperationContext().toString(); } else { operationContextText = "n/a"; } mainSourceCompleted = new AtomicBoolean(false); totalCount = new AtomicInteger(0); serializedEmitFailureHandler = new SerializedEmitFailureHandler(); mainSink = Sinks.many().unicast().onBackpressureBuffer(); groupSinks = new CopyOnWriteArrayList<>(); this.executorService = Executors.newSingleThreadScheduledExecutor( new CosmosDaemonThreadFactory("BulkExecutor-" + instanceCount.incrementAndGet())); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, this.maxMicroBatchIntervalInMs, this.maxMicroBatchIntervalInMs, TimeUnit.MILLISECONDS); } public Flux<CosmosBulkOperationResponse<TContext>> execute() { } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionedGroup( GroupedFlux<PartitionScopeThresholds, CosmosItemOperation> partitionedGroupFluxOfInputOperations) { final PartitionScopeThresholds thresholds = partitionedGroupFluxOfInputOperations.key(); final FluxProcessor<CosmosItemOperation, CosmosItemOperation> groupFluxProcessor = UnicastProcessor.<CosmosItemOperation>create().serialize(); final FluxSink<CosmosItemOperation> groupSink = groupFluxProcessor.sink(FluxSink.OverflowStrategy.BUFFER); groupSinks.add(groupSink); AtomicLong firstRecordTimeStamp = new AtomicLong(-1); AtomicLong currentMicroBatchSize = new AtomicLong(0); AtomicInteger currentTotalSerializedLength = new AtomicInteger(0); return partitionedGroupFluxOfInputOperations .mergeWith(groupFluxProcessor) .onBackpressureBuffer() .timestamp() .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .bufferUntil(timeStampItemOperationTuple -> { long timestamp = timeStampItemOperationTuple.getT1(); CosmosItemOperation itemOperation = timeStampItemOperationTuple.getT2(); logger.trace( "BufferUntil - enqueued {}, {}, Context: {} {}", timestamp, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (itemOperation == FlushBuffersItemOperation.singleton()) { long currentMicroBatchSizeSnapshot = currentMicroBatchSize.get(); if (currentMicroBatchSizeSnapshot > 0) { logger.trace( "Flushing PKRange {} (batch size: {}) due to FlushItemOperation, Context: {} {}", thresholds.getPartitionKeyRangeId(), currentMicroBatchSizeSnapshot, this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; } firstRecordTimeStamp.compareAndSet(-1, timestamp); long age = timestamp - firstRecordTimeStamp.get(); long batchSize = currentMicroBatchSize.incrementAndGet(); int totalSerializedLength = this.calculateTotalSerializedLength(currentTotalSerializedLength, itemOperation); if (batchSize >= thresholds.getTargetMicroBatchSizeSnapshot() || age >= this.maxMicroBatchIntervalInMs || totalSerializedLength >= BatchRequestResponseConstants.MAX_DIRECT_MODE_BATCH_REQUEST_BODY_SIZE_IN_BYTES) { logger.debug( "BufferUntil - Flushing PKRange {} due to BatchSize ({}), payload size ({}) or age ({}), " + "Triggering {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), batchSize, totalSerializedLength, age, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; }) .flatMap( (List<Tuple2<Long, CosmosItemOperation>> timeStampAndItemOperationTuples) -> { List<CosmosItemOperation> operations = new ArrayList<>(timeStampAndItemOperationTuples.size()); for (Tuple2<Long, CosmosItemOperation> timeStampAndItemOperationTuple : timeStampAndItemOperationTuples) { CosmosItemOperation itemOperation = timeStampAndItemOperationTuple.getT2(); if (itemOperation == FlushBuffersItemOperation.singleton()) { continue; } operations.add(itemOperation); } logger.debug( "Flushing PKRange {} micro batch with {} operations, Context: {} {}", thresholds.getPartitionKeyRangeId(), operations.size(), this.operationContextText, getThreadInfo()); return executeOperations(operations, thresholds, groupSink); }, ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchConcurrency(this.cosmosBulkExecutionOptions)); } private int calculateTotalSerializedLength(AtomicInteger currentTotalSerializedLength, CosmosItemOperation item) { if (item instanceof CosmosItemOperationBase) { return currentTotalSerializedLength.accumulateAndGet( ((CosmosItemOperationBase) item).getSerializedLength(), (currentValue, incremental) -> currentValue + incremental); } return currentTotalSerializedLength.get(); } private Flux<CosmosBulkOperationResponse<TContext>> executeOperations( List<CosmosItemOperation> operations, PartitionScopeThresholds thresholds, FluxSink<CosmosItemOperation> groupSink) { if (operations.size() == 0) { logger.trace("Empty operations list, Context: {}", this.operationContextText); return Flux.empty(); } String pkRange = thresholds.getPartitionKeyRangeId(); ServerOperationBatchRequest serverOperationBatchRequest = BulkExecutorUtil.createBatchRequest(operations, pkRange); if (serverOperationBatchRequest.getBatchPendingOperations().size() > 0) { serverOperationBatchRequest.getBatchPendingOperations().forEach(groupSink::next); } return Flux.just(serverOperationBatchRequest.getBatchRequest()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((PartitionKeyRangeServerBatchRequest serverRequest) -> this.executePartitionKeyRangeServerBatchRequest(serverRequest, groupSink, thresholds)); } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionKeyRangeServerBatchRequest( PartitionKeyRangeServerBatchRequest serverRequest, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { return this.executeBatchRequest(serverRequest) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(response -> Flux .fromIterable(response.getResults()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosBatchOperationResult result) -> handleTransactionalBatchOperationResult(response, result, groupSink, thresholds))) .onErrorResume((Throwable throwable) -> { if (!(throwable instanceof Exception)) { throw Exceptions.propagate(throwable); } Exception exception = (Exception) throwable; return Flux .fromIterable(serverRequest.getOperations()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosItemOperation itemOperation) -> handleTransactionalBatchExecutionException(itemOperation, exception, groupSink, thresholds)); }); } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchOperationResult( CosmosBatchResponse response, CosmosBatchOperationResult operationResult, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { CosmosBulkItemResponse cosmosBulkItemResponse = ModelBridgeInternal .createCosmosBulkItemResponse(operationResult, response); CosmosItemOperation itemOperation = operationResult.getOperation(); TContext actualContext = this.getActualContext(itemOperation); logger.debug( "HandleTransactionalBatchOperationResult - PKRange {}, Response Status Code {}, " + "Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (!operationResult.isSuccessStatusCode()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy().shouldRetry(operationResult).flatMap( result -> { if (result.shouldRetry) { logger.debug( "HandleTransactionalBatchOperationResult - enqueue retry, PKRange {}, Response " + "Status Code {}, Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return this.enqueueForRetry(result.backOffTime, groupSink, itemOperation, thresholds); } else { logger.error( "HandleTransactionalBatchOperationResult - Fail, PKRange {}, Response Status " + "Code {}, Operation Status Code {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } }); } else { throw new UnsupportedOperationException("Unknown CosmosItemOperation."); } } thresholds.recordSuccessfulOperation(); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } private TContext getActualContext(CosmosItemOperation itemOperation) { ItemBulkOperation<?, ?> itemBulkOperation = null; if (itemOperation instanceof ItemBulkOperation<?, ?>) { itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; } if (itemBulkOperation == null) { return this.batchContext; } TContext operationContext = itemBulkOperation.getContext(); if (operationContext != null) { return operationContext; } return this.batchContext; } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchExecutionException( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { logger.debug( "HandleTransactionalBatchExecutionException, PKRange {}, Error: {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (exception instanceof CosmosException && itemOperation instanceof ItemBulkOperation<?, ?>) { CosmosException cosmosException = (CosmosException) exception; ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy() .shouldRetryForGone(cosmosException.getStatusCode(), cosmosException.getSubStatusCode()) .flatMap(shouldRetryGone -> { if (shouldRetryGone) { logger.debug( "HandleTransactionalBatchExecutionException - Retry due to split, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); mainSink.emitNext(itemOperation, serializedEmitFailureHandler); return Mono.empty(); } else { logger.debug( "HandleTransactionalBatchExecutionException - Retry other, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return retryOtherExceptions( itemOperation, exception, groupSink, cosmosException, itemBulkOperation, thresholds); } }); } TContext actualContext = this.getActualContext(itemOperation); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse(itemOperation, exception, actualContext)); } private Mono<CosmosBulkOperationResponse<TContext>> enqueueForRetry( Duration backOffTime, FluxSink<CosmosItemOperation> groupSink, CosmosItemOperation itemOperation, PartitionScopeThresholds thresholds) { thresholds.recordEnqueuedRetry(); if (backOffTime == null || backOffTime.isZero()) { groupSink.next(itemOperation); return Mono.empty(); } else { return Mono .delay(backOffTime) .flatMap((dummy) -> { groupSink.next(itemOperation); return Mono.empty(); }); } } private Mono<CosmosBulkOperationResponse<TContext>> retryOtherExceptions( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, CosmosException cosmosException, ItemBulkOperation<?, ?> itemBulkOperation, PartitionScopeThresholds thresholds) { TContext actualContext = this.getActualContext(itemOperation); return itemBulkOperation.getRetryPolicy().shouldRetry(cosmosException).flatMap(result -> { if (result.shouldRetry) { return this.enqueueForRetry(result.backOffTime, groupSink, itemBulkOperation, thresholds); } else { return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, exception, actualContext)); } }); } private Mono<CosmosBatchResponse> executeBatchRequest(PartitionKeyRangeServerBatchRequest serverRequest) { RequestOptions options = new RequestOptions(); options.setOperationContextAndListenerTuple(operationListener); if (!this.docClientWrapper.isContentResponseOnWriteEnabled() && serverRequest.getOperations().size() > 0) { for (CosmosItemOperation itemOperation : serverRequest.getOperations()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; if (itemBulkOperation.getOperationType() == CosmosItemOperationType.READ || (itemBulkOperation.getRequestOptions() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled().booleanValue())) { options.setContentResponseOnWriteEnabled(true); break; } } } } return this.docClientWrapper.executeBatchRequest( BridgeInternal.getLink(this.container), serverRequest, options, false); } private void completeAllSinks() { logger.info("Closing all sinks, Context: {}", this.operationContextText); executorService.shutdown(); logger.debug("Executor service shut down, Context: {}", this.operationContextText); mainSink.tryEmitComplete(); logger.debug("Main sink completed, Context: {}", this.operationContextText); groupSinks.forEach(FluxSink::complete); logger.debug("All group sinks completed, Context: {}", this.operationContextText); try { this.executorService.shutdown(); logger.debug("Shutting down the executor service"); } catch (Exception e) { logger.warn("Failed to shut down the executor service", e); } } private void onFlush() { try { this.groupSinks.forEach(sink -> sink.next(FlushBuffersItemOperation.singleton())); } catch(Throwable t) { logger.error("Callback invocation 'onFlush' failed.", t); } } private static String getItemOperationDiagnostics(CosmosItemOperation operation) { if (operation == FlushBuffersItemOperation.singleton()) { return "ItemOperation[Type: Flush]"; } StringBuilder sb = new StringBuilder(); sb .append("ItemOperation[Type: ") .append(operation.getOperationType().toString()) .append(", PK: ") .append(operation.getPartitionKeyValue() != null ? operation.getPartitionKeyValue().toString() : "n/a") .append(", id: ") .append(operation.getId()) .append("]"); return sb.toString(); } private static String getThreadInfo() { StringBuilder sb = new StringBuilder(); Thread t = Thread.currentThread(); sb .append("Thread[") .append("Name: ") .append(t.getName()) .append(",Group: ") .append(t.getThreadGroup() != null ? t.getThreadGroup().getName() : "n/a") .append(", isDaemon: ") .append(t.isDaemon()) .append(", Id: ") .append(t.getId()) .append("]"); return sb.toString(); } private class SerializedEmitFailureHandler implements Sinks.EmitFailureHandler { @Override public boolean onEmitFailure(SignalType signalType, Sinks.EmitResult emitResult) { logger.debug("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); if (emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED)) { return true; } return false; } } }
class BulkExecutor<TContext> { private final static Logger logger = LoggerFactory.getLogger(BulkExecutor.class); private final static AtomicLong instanceCount = new AtomicLong(0); private final CosmosAsyncContainer container; private final AsyncDocumentClient docClientWrapper; private final String operationContextText; private final OperationContextAndListenerTuple operationListener; private final ThrottlingRetryOptions throttlingRetryOptions; private final Flux<com.azure.cosmos.models.CosmosItemOperation> inputOperations; private final Long maxMicroBatchIntervalInMs; private final TContext batchContext; private final ConcurrentMap<String, PartitionScopeThresholds> partitionScopeThresholds; private final CosmosBulkExecutionOptions cosmosBulkExecutionOptions; private final AtomicBoolean mainSourceCompleted; private final AtomicInteger totalCount; private final Sinks.EmitFailureHandler serializedEmitFailureHandler; private final Sinks.Many<CosmosItemOperation> mainSink; private final List<FluxSink<CosmosItemOperation>> groupSinks; private final ScheduledExecutorService executorService; private ScheduledFuture<?> scheduledFutureForFlush; public BulkExecutor(CosmosAsyncContainer container, Flux<CosmosItemOperation> inputOperations, CosmosBulkExecutionOptions cosmosBulkOptions) { checkNotNull(container, "expected non-null container"); checkNotNull(inputOperations, "expected non-null inputOperations"); checkNotNull(cosmosBulkOptions, "expected non-null bulkOptions"); this.cosmosBulkExecutionOptions = cosmosBulkOptions; this.container = container; this.inputOperations = inputOperations; this.docClientWrapper = CosmosBridgeInternal.getAsyncDocumentClient(container.getDatabase()); this.throttlingRetryOptions = docClientWrapper.getConnectionPolicy().getThrottlingRetryOptions(); maxMicroBatchIntervalInMs = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchInterval(cosmosBulkExecutionOptions) .toMillis(); batchContext = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getLegacyBatchScopedContext(cosmosBulkExecutionOptions); this.partitionScopeThresholds = ImplementationBridgeHelpers.CosmosBulkExecutionThresholdsStateHelper .getBulkExecutionThresholdsAccessor() .getPartitionScopeThresholds(cosmosBulkExecutionOptions.getThresholdsState()); operationListener = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getOperationContext(cosmosBulkExecutionOptions); if (operationListener != null && operationListener.getOperationContext() != null) { operationContextText = operationListener.getOperationContext().toString(); } else { operationContextText = "n/a"; } mainSourceCompleted = new AtomicBoolean(false); totalCount = new AtomicInteger(0); serializedEmitFailureHandler = new SerializedEmitFailureHandler(); mainSink = Sinks.many().unicast().onBackpressureBuffer(); groupSinks = new CopyOnWriteArrayList<>(); this.executorService = Executors.newSingleThreadScheduledExecutor( new CosmosDaemonThreadFactory("BulkExecutor-" + instanceCount.incrementAndGet())); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, this.maxMicroBatchIntervalInMs, this.maxMicroBatchIntervalInMs, TimeUnit.MILLISECONDS); } public Flux<CosmosBulkOperationResponse<TContext>> execute() { } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionedGroup( GroupedFlux<PartitionScopeThresholds, CosmosItemOperation> partitionedGroupFluxOfInputOperations) { final PartitionScopeThresholds thresholds = partitionedGroupFluxOfInputOperations.key(); final FluxProcessor<CosmosItemOperation, CosmosItemOperation> groupFluxProcessor = UnicastProcessor.<CosmosItemOperation>create().serialize(); final FluxSink<CosmosItemOperation> groupSink = groupFluxProcessor.sink(FluxSink.OverflowStrategy.BUFFER); groupSinks.add(groupSink); AtomicLong firstRecordTimeStamp = new AtomicLong(-1); AtomicLong currentMicroBatchSize = new AtomicLong(0); AtomicInteger currentTotalSerializedLength = new AtomicInteger(0); return partitionedGroupFluxOfInputOperations .mergeWith(groupFluxProcessor) .onBackpressureBuffer() .timestamp() .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .bufferUntil(timeStampItemOperationTuple -> { long timestamp = timeStampItemOperationTuple.getT1(); CosmosItemOperation itemOperation = timeStampItemOperationTuple.getT2(); logger.trace( "BufferUntil - enqueued {}, {}, Context: {} {}", timestamp, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (itemOperation == FlushBuffersItemOperation.singleton()) { long currentMicroBatchSizeSnapshot = currentMicroBatchSize.get(); if (currentMicroBatchSizeSnapshot > 0) { logger.trace( "Flushing PKRange {} (batch size: {}) due to FlushItemOperation, Context: {} {}", thresholds.getPartitionKeyRangeId(), currentMicroBatchSizeSnapshot, this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; } firstRecordTimeStamp.compareAndSet(-1, timestamp); long age = timestamp - firstRecordTimeStamp.get(); long batchSize = currentMicroBatchSize.incrementAndGet(); int totalSerializedLength = this.calculateTotalSerializedLength(currentTotalSerializedLength, itemOperation); if (batchSize >= thresholds.getTargetMicroBatchSizeSnapshot() || age >= this.maxMicroBatchIntervalInMs || totalSerializedLength >= BatchRequestResponseConstants.MAX_DIRECT_MODE_BATCH_REQUEST_BODY_SIZE_IN_BYTES) { logger.debug( "BufferUntil - Flushing PKRange {} due to BatchSize ({}), payload size ({}) or age ({}), " + "Triggering {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), batchSize, totalSerializedLength, age, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; }) .flatMap( (List<Tuple2<Long, CosmosItemOperation>> timeStampAndItemOperationTuples) -> { List<CosmosItemOperation> operations = new ArrayList<>(timeStampAndItemOperationTuples.size()); for (Tuple2<Long, CosmosItemOperation> timeStampAndItemOperationTuple : timeStampAndItemOperationTuples) { CosmosItemOperation itemOperation = timeStampAndItemOperationTuple.getT2(); if (itemOperation == FlushBuffersItemOperation.singleton()) { continue; } operations.add(itemOperation); } logger.debug( "Flushing PKRange {} micro batch with {} operations, Context: {} {}", thresholds.getPartitionKeyRangeId(), operations.size(), this.operationContextText, getThreadInfo()); return executeOperations(operations, thresholds, groupSink); }, ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchConcurrency(this.cosmosBulkExecutionOptions)); } private int calculateTotalSerializedLength(AtomicInteger currentTotalSerializedLength, CosmosItemOperation item) { if (item instanceof CosmosItemOperationBase) { return currentTotalSerializedLength.accumulateAndGet( ((CosmosItemOperationBase) item).getSerializedLength(), (currentValue, incremental) -> currentValue + incremental); } return currentTotalSerializedLength.get(); } private Flux<CosmosBulkOperationResponse<TContext>> executeOperations( List<CosmosItemOperation> operations, PartitionScopeThresholds thresholds, FluxSink<CosmosItemOperation> groupSink) { if (operations.size() == 0) { logger.trace("Empty operations list, Context: {}", this.operationContextText); return Flux.empty(); } String pkRange = thresholds.getPartitionKeyRangeId(); ServerOperationBatchRequest serverOperationBatchRequest = BulkExecutorUtil.createBatchRequest(operations, pkRange); if (serverOperationBatchRequest.getBatchPendingOperations().size() > 0) { serverOperationBatchRequest.getBatchPendingOperations().forEach(groupSink::next); } return Flux.just(serverOperationBatchRequest.getBatchRequest()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((PartitionKeyRangeServerBatchRequest serverRequest) -> this.executePartitionKeyRangeServerBatchRequest(serverRequest, groupSink, thresholds)); } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionKeyRangeServerBatchRequest( PartitionKeyRangeServerBatchRequest serverRequest, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { return this.executeBatchRequest(serverRequest) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(response -> Flux .fromIterable(response.getResults()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosBatchOperationResult result) -> handleTransactionalBatchOperationResult(response, result, groupSink, thresholds))) .onErrorResume((Throwable throwable) -> { if (!(throwable instanceof Exception)) { throw Exceptions.propagate(throwable); } Exception exception = (Exception) throwable; return Flux .fromIterable(serverRequest.getOperations()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosItemOperation itemOperation) -> handleTransactionalBatchExecutionException(itemOperation, exception, groupSink, thresholds)); }); } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchOperationResult( CosmosBatchResponse response, CosmosBatchOperationResult operationResult, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { CosmosBulkItemResponse cosmosBulkItemResponse = ModelBridgeInternal .createCosmosBulkItemResponse(operationResult, response); CosmosItemOperation itemOperation = operationResult.getOperation(); TContext actualContext = this.getActualContext(itemOperation); logger.debug( "HandleTransactionalBatchOperationResult - PKRange {}, Response Status Code {}, " + "Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (!operationResult.isSuccessStatusCode()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy().shouldRetry(operationResult).flatMap( result -> { if (result.shouldRetry) { logger.debug( "HandleTransactionalBatchOperationResult - enqueue retry, PKRange {}, Response " + "Status Code {}, Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return this.enqueueForRetry(result.backOffTime, groupSink, itemOperation, thresholds); } else { logger.error( "HandleTransactionalBatchOperationResult - Fail, PKRange {}, Response Status " + "Code {}, Operation Status Code {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } }); } else { throw new UnsupportedOperationException("Unknown CosmosItemOperation."); } } thresholds.recordSuccessfulOperation(); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } private TContext getActualContext(CosmosItemOperation itemOperation) { ItemBulkOperation<?, ?> itemBulkOperation = null; if (itemOperation instanceof ItemBulkOperation<?, ?>) { itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; } if (itemBulkOperation == null) { return this.batchContext; } TContext operationContext = itemBulkOperation.getContext(); if (operationContext != null) { return operationContext; } return this.batchContext; } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchExecutionException( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { logger.debug( "HandleTransactionalBatchExecutionException, PKRange {}, Error: {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (exception instanceof CosmosException && itemOperation instanceof ItemBulkOperation<?, ?>) { CosmosException cosmosException = (CosmosException) exception; ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy() .shouldRetryForGone(cosmosException.getStatusCode(), cosmosException.getSubStatusCode()) .flatMap(shouldRetryGone -> { if (shouldRetryGone) { logger.debug( "HandleTransactionalBatchExecutionException - Retry due to split, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); mainSink.emitNext(itemOperation, serializedEmitFailureHandler); return Mono.empty(); } else { logger.debug( "HandleTransactionalBatchExecutionException - Retry other, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return retryOtherExceptions( itemOperation, exception, groupSink, cosmosException, itemBulkOperation, thresholds); } }); } TContext actualContext = this.getActualContext(itemOperation); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse(itemOperation, exception, actualContext)); } private Mono<CosmosBulkOperationResponse<TContext>> enqueueForRetry( Duration backOffTime, FluxSink<CosmosItemOperation> groupSink, CosmosItemOperation itemOperation, PartitionScopeThresholds thresholds) { thresholds.recordEnqueuedRetry(); if (backOffTime == null || backOffTime.isZero()) { groupSink.next(itemOperation); return Mono.empty(); } else { return Mono .delay(backOffTime) .flatMap((dummy) -> { groupSink.next(itemOperation); return Mono.empty(); }); } } private Mono<CosmosBulkOperationResponse<TContext>> retryOtherExceptions( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, CosmosException cosmosException, ItemBulkOperation<?, ?> itemBulkOperation, PartitionScopeThresholds thresholds) { TContext actualContext = this.getActualContext(itemOperation); return itemBulkOperation.getRetryPolicy().shouldRetry(cosmosException).flatMap(result -> { if (result.shouldRetry) { return this.enqueueForRetry(result.backOffTime, groupSink, itemBulkOperation, thresholds); } else { return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, exception, actualContext)); } }); } private Mono<CosmosBatchResponse> executeBatchRequest(PartitionKeyRangeServerBatchRequest serverRequest) { RequestOptions options = new RequestOptions(); options.setOperationContextAndListenerTuple(operationListener); if (!this.docClientWrapper.isContentResponseOnWriteEnabled() && serverRequest.getOperations().size() > 0) { for (CosmosItemOperation itemOperation : serverRequest.getOperations()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; if (itemBulkOperation.getOperationType() == CosmosItemOperationType.READ || (itemBulkOperation.getRequestOptions() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled().booleanValue())) { options.setContentResponseOnWriteEnabled(true); break; } } } } return this.docClientWrapper.executeBatchRequest( BridgeInternal.getLink(this.container), serverRequest, options, false); } private void completeAllSinks() { logger.info("Closing all sinks, Context: {}", this.operationContextText); executorService.shutdown(); logger.debug("Executor service shut down, Context: {}", this.operationContextText); mainSink.tryEmitComplete(); logger.debug("Main sink completed, Context: {}", this.operationContextText); groupSinks.forEach(FluxSink::complete); logger.debug("All group sinks completed, Context: {}", this.operationContextText); try { this.executorService.shutdown(); logger.debug("Shutting down the executor service"); } catch (Exception e) { logger.warn("Failed to shut down the executor service", e); } } private void onFlush() { try { this.groupSinks.forEach(sink -> sink.next(FlushBuffersItemOperation.singleton())); } catch(Throwable t) { logger.error("Callback invocation 'onFlush' failed.", t); } } private static String getItemOperationDiagnostics(CosmosItemOperation operation) { if (operation == FlushBuffersItemOperation.singleton()) { return "ItemOperation[Type: Flush]"; } StringBuilder sb = new StringBuilder(); sb .append("ItemOperation[Type: ") .append(operation.getOperationType().toString()) .append(", PK: ") .append(operation.getPartitionKeyValue() != null ? operation.getPartitionKeyValue().toString() : "n/a") .append(", id: ") .append(operation.getId()) .append("]"); return sb.toString(); } private static String getThreadInfo() { StringBuilder sb = new StringBuilder(); Thread t = Thread.currentThread(); sb .append("Thread[") .append("Name: ") .append(t.getName()) .append(",Group: ") .append(t.getThreadGroup() != null ? t.getThreadGroup().getName() : "n/a") .append(", isDaemon: ") .append(t.isDaemon()) .append(", Id: ") .append(t.getId()) .append("]"); return sb.toString(); } private class SerializedEmitFailureHandler implements Sinks.EmitFailureHandler { @Override public boolean onEmitFailure(SignalType signalType, Sinks.EmitResult emitResult) { if (emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED)) { logger.debug("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); return true; } logger.error("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); return false; } } }
typo: hung -> hang
public Flux<CosmosBulkOperationResponse<TContext>> execute() { Integer nullableMaxConcurrentCosmosPartitions = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxConcurrentCosmosPartitions(cosmosBulkExecutionOptions); Mono<Integer> maxConcurrentCosmosPartitionsMono = nullableMaxConcurrentCosmosPartitions != null ? Mono.just(Math.max(256, nullableMaxConcurrentCosmosPartitions)) : this.container.getFeedRanges().map(ranges -> Math.max(256, ranges.size() * 2)); return maxConcurrentCosmosPartitionsMono .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(maxConcurrentCosmosPartitions -> { logger.debug("BulkExecutor.execute with MaxConcurrentPartitions: {}, Context: {}", maxConcurrentCosmosPartitions, this.operationContextText); return this.inputOperations .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .onErrorContinue((throwable, o) -> logger.error("Skipping an error operation while processing {}. Cause: {}, Context: {}", o, throwable.getMessage(), this.operationContextText)) .doOnNext((CosmosItemOperation cosmosItemOperation) -> { BulkExecutorUtil.setRetryPolicyForBulk( docClientWrapper, this.container, cosmosItemOperation, this.throttlingRetryOptions); if (cosmosItemOperation != FlushBuffersItemOperation.singleton()) { totalCount.incrementAndGet(); } logger.trace( "SetupRetryPolicy, {}, TotalCount: {}, Context: {}, {}", getItemOperationDiagnostics(cosmosItemOperation), totalCount.get(), this.operationContextText, getThreadInfo() ); }) .doOnComplete(() -> { mainSourceCompleted.set(true); long totalCountSnapshot = totalCount.get(); logger.debug("Main source completed - totalCountSnapshot, this.operationContextText); if (totalCountSnapshot == 0) { completeAllSinks(); } else { ScheduledFuture<?> scheduledFutureSnapshot = this.scheduledFutureForFlush; if (scheduledFutureSnapshot != null) { try { scheduledFutureSnapshot.cancel(true); logger.debug("Cancelled all future scheduled tasks {}", getThreadInfo()); } catch (Exception e) { logger.warn("Failed to cancel scheduled tasks{}", getThreadInfo(), e); } } this.onFlush(); long flushIntervalAfterDrainingIncomingFlux = Math.min( this.maxMicroBatchIntervalInMs, BatchRequestResponseConstants .DEFAULT_MAX_MICRO_BATCH_INTERVAL_AFTER_DRAINING_INCOMING_FLUX_IN_MILLISECONDS); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, flushIntervalAfterDrainingIncomingFlux, flushIntervalAfterDrainingIncomingFlux, TimeUnit.MILLISECONDS); } }) .mergeWith(mainSink.asFlux()) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap( operation -> { logger.trace("Before Resolve PkRangeId, {}, Context: {} {}", getItemOperationDiagnostics(operation), this.operationContextText, getThreadInfo()); return BulkExecutorUtil.resolvePartitionKeyRangeId(this.docClientWrapper, this.container, operation) .map((String pkRangeId) -> { PartitionScopeThresholds partitionScopeThresholds = this.partitionScopeThresholds.computeIfAbsent( pkRangeId, (newPkRangeId) -> new PartitionScopeThresholds(newPkRangeId, this.cosmosBulkExecutionOptions)); logger.trace("Resolved PkRangeId, {}, PKRangeId: {} Context: {} {}", getItemOperationDiagnostics(operation), pkRangeId, this.operationContextText, getThreadInfo()); return Pair.of(partitionScopeThresholds, operation); }); }) .groupBy(Pair::getKey, Pair::getValue) .flatMap( this::executePartitionedGroup, maxConcurrentCosmosPartitions) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .doOnNext(requestAndResponse -> { int totalCountAfterDecrement = totalCount.decrementAndGet(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountAfterDecrement == 0 && mainSourceCompletedSnapshot) { logger.debug("All work completed, {}, TotalCount: {}, Context: {} {}", getItemOperationDiagnostics(requestAndResponse.getOperation()), totalCountAfterDecrement, this.operationContextText, getThreadInfo()); completeAllSinks(); } else { logger.debug( "Work left - TotalCount after decrement: {}, main sink completed {}, {}, Context: {} {}", totalCountAfterDecrement, mainSourceCompletedSnapshot, getItemOperationDiagnostics(requestAndResponse.getOperation()), this.operationContextText, getThreadInfo()); } }) .doOnComplete(() -> { int totalCountSnapshot = totalCount.get(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountSnapshot == 0 && mainSourceCompletedSnapshot) { logger.debug("DoOnComplete: All work completed, Context: {}", this.operationContextText); completeAllSinks(); } else { logger.debug( "DoOnComplete: Work left - TotalCount after decrement: {}, main sink completed {}, Context: {} {}", totalCountSnapshot, mainSourceCompletedSnapshot, this.operationContextText, getThreadInfo()); } }); }); }
default concurrency (256), Integer nullableMaxConcurrentCosmosPartitions = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxConcurrentCosmosPartitions(cosmosBulkExecutionOptions); Mono<Integer> maxConcurrentCosmosPartitionsMono = nullableMaxConcurrentCosmosPartitions != null ? Mono.just(Math.max(256, nullableMaxConcurrentCosmosPartitions)) : this.container.getFeedRanges().map(ranges -> Math.max(256, ranges.size() * 2)); return maxConcurrentCosmosPartitionsMono .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(maxConcurrentCosmosPartitions -> { logger.debug("BulkExecutor.execute with MaxConcurrentPartitions: {}, Context: {}", maxConcurrentCosmosPartitions, this.operationContextText); return this.inputOperations .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .onErrorContinue((throwable, o) -> logger.error("Skipping an error operation while processing {}. Cause: {}, Context: {}", o, throwable.getMessage(), this.operationContextText)) .doOnNext((CosmosItemOperation cosmosItemOperation) -> { BulkExecutorUtil.setRetryPolicyForBulk( docClientWrapper, this.container, cosmosItemOperation, this.throttlingRetryOptions); if (cosmosItemOperation != FlushBuffersItemOperation.singleton()) { totalCount.incrementAndGet(); } logger.trace( "SetupRetryPolicy, {}, TotalCount: {}, Context: {}, {}", getItemOperationDiagnostics(cosmosItemOperation), totalCount.get(), this.operationContextText, getThreadInfo() ); }) .doOnComplete(() -> { mainSourceCompleted.set(true); long totalCountSnapshot = totalCount.get(); logger.debug("Main source completed - totalCountSnapshot, this.operationContextText); if (totalCountSnapshot == 0) { completeAllSinks(); } else { ScheduledFuture<?> scheduledFutureSnapshot = this.scheduledFutureForFlush; if (scheduledFutureSnapshot != null) { try { scheduledFutureSnapshot.cancel(true); logger.debug("Cancelled all future scheduled tasks {}", getThreadInfo()); } catch (Exception e) { logger.warn("Failed to cancel scheduled tasks{}", getThreadInfo(), e); } } this.onFlush(); long flushIntervalAfterDrainingIncomingFlux = Math.min( this.maxMicroBatchIntervalInMs, BatchRequestResponseConstants .DEFAULT_MAX_MICRO_BATCH_INTERVAL_AFTER_DRAINING_INCOMING_FLUX_IN_MILLISECONDS); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, flushIntervalAfterDrainingIncomingFlux, flushIntervalAfterDrainingIncomingFlux, TimeUnit.MILLISECONDS); } }) .mergeWith(mainSink.asFlux()) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap( operation -> { logger.trace("Before Resolve PkRangeId, {}, Context: {} {}", getItemOperationDiagnostics(operation), this.operationContextText, getThreadInfo()); return BulkExecutorUtil.resolvePartitionKeyRangeId(this.docClientWrapper, this.container, operation) .map((String pkRangeId) -> { PartitionScopeThresholds partitionScopeThresholds = this.partitionScopeThresholds.computeIfAbsent( pkRangeId, (newPkRangeId) -> new PartitionScopeThresholds(newPkRangeId, this.cosmosBulkExecutionOptions)); logger.trace("Resolved PkRangeId, {}, PKRangeId: {} Context: {} {}", getItemOperationDiagnostics(operation), pkRangeId, this.operationContextText, getThreadInfo()); return Pair.of(partitionScopeThresholds, operation); }); }) .groupBy(Pair::getKey, Pair::getValue) .flatMap( this::executePartitionedGroup, maxConcurrentCosmosPartitions) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .doOnNext(requestAndResponse -> { int totalCountAfterDecrement = totalCount.decrementAndGet(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountAfterDecrement == 0 && mainSourceCompletedSnapshot) { logger.debug("All work completed, {}, TotalCount: {}, Context: {} {}", getItemOperationDiagnostics(requestAndResponse.getOperation()), totalCountAfterDecrement, this.operationContextText, getThreadInfo()); completeAllSinks(); } else { logger.debug( "Work left - TotalCount after decrement: {}, main sink completed {}, {}, Context: {} {}", totalCountAfterDecrement, mainSourceCompletedSnapshot, getItemOperationDiagnostics(requestAndResponse.getOperation()), this.operationContextText, getThreadInfo()); } }) .doOnComplete(() -> { int totalCountSnapshot = totalCount.get(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountSnapshot == 0 && mainSourceCompletedSnapshot) { logger.debug("DoOnComplete: All work completed, Context: {}", this.operationContextText); completeAllSinks(); } else { logger.debug( "DoOnComplete: Work left - TotalCount after decrement: {}, main sink completed {}, Context: {} {}", totalCountSnapshot, mainSourceCompletedSnapshot, this.operationContextText, getThreadInfo()); } }); }
class BulkExecutor<TContext> { private final static Logger logger = LoggerFactory.getLogger(BulkExecutor.class); private final static AtomicLong instanceCount = new AtomicLong(0); private final CosmosAsyncContainer container; private final AsyncDocumentClient docClientWrapper; private final String operationContextText; private final OperationContextAndListenerTuple operationListener; private final ThrottlingRetryOptions throttlingRetryOptions; private final Flux<com.azure.cosmos.models.CosmosItemOperation> inputOperations; private final Long maxMicroBatchIntervalInMs; private final TContext batchContext; private final ConcurrentMap<String, PartitionScopeThresholds> partitionScopeThresholds; private final CosmosBulkExecutionOptions cosmosBulkExecutionOptions; private final AtomicBoolean mainSourceCompleted; private final AtomicInteger totalCount; private final Sinks.EmitFailureHandler serializedEmitFailureHandler; private final Sinks.Many<CosmosItemOperation> mainSink; private final List<FluxSink<CosmosItemOperation>> groupSinks; private final ScheduledExecutorService executorService; private ScheduledFuture<?> scheduledFutureForFlush; public BulkExecutor(CosmosAsyncContainer container, Flux<CosmosItemOperation> inputOperations, CosmosBulkExecutionOptions cosmosBulkOptions) { checkNotNull(container, "expected non-null container"); checkNotNull(inputOperations, "expected non-null inputOperations"); checkNotNull(cosmosBulkOptions, "expected non-null bulkOptions"); this.cosmosBulkExecutionOptions = cosmosBulkOptions; this.container = container; this.inputOperations = inputOperations; this.docClientWrapper = CosmosBridgeInternal.getAsyncDocumentClient(container.getDatabase()); this.throttlingRetryOptions = docClientWrapper.getConnectionPolicy().getThrottlingRetryOptions(); maxMicroBatchIntervalInMs = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchInterval(cosmosBulkExecutionOptions) .toMillis(); batchContext = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getLegacyBatchScopedContext(cosmosBulkExecutionOptions); this.partitionScopeThresholds = ImplementationBridgeHelpers.CosmosBulkExecutionThresholdsStateHelper .getBulkExecutionThresholdsAccessor() .getPartitionScopeThresholds(cosmosBulkExecutionOptions.getThresholdsState()); operationListener = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getOperationContext(cosmosBulkExecutionOptions); if (operationListener != null && operationListener.getOperationContext() != null) { operationContextText = operationListener.getOperationContext().toString(); } else { operationContextText = "n/a"; } mainSourceCompleted = new AtomicBoolean(false); totalCount = new AtomicInteger(0); serializedEmitFailureHandler = new SerializedEmitFailureHandler(); mainSink = Sinks.many().unicast().onBackpressureBuffer(); groupSinks = new CopyOnWriteArrayList<>(); this.executorService = Executors.newSingleThreadScheduledExecutor( new CosmosDaemonThreadFactory("BulkExecutor-" + instanceCount.incrementAndGet())); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, this.maxMicroBatchIntervalInMs, this.maxMicroBatchIntervalInMs, TimeUnit.MILLISECONDS); } public Flux<CosmosBulkOperationResponse<TContext>> execute() { } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionedGroup( GroupedFlux<PartitionScopeThresholds, CosmosItemOperation> partitionedGroupFluxOfInputOperations) { final PartitionScopeThresholds thresholds = partitionedGroupFluxOfInputOperations.key(); final FluxProcessor<CosmosItemOperation, CosmosItemOperation> groupFluxProcessor = UnicastProcessor.<CosmosItemOperation>create().serialize(); final FluxSink<CosmosItemOperation> groupSink = groupFluxProcessor.sink(FluxSink.OverflowStrategy.BUFFER); groupSinks.add(groupSink); AtomicLong firstRecordTimeStamp = new AtomicLong(-1); AtomicLong currentMicroBatchSize = new AtomicLong(0); AtomicInteger currentTotalSerializedLength = new AtomicInteger(0); return partitionedGroupFluxOfInputOperations .mergeWith(groupFluxProcessor) .onBackpressureBuffer() .timestamp() .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .bufferUntil(timeStampItemOperationTuple -> { long timestamp = timeStampItemOperationTuple.getT1(); CosmosItemOperation itemOperation = timeStampItemOperationTuple.getT2(); logger.trace( "BufferUntil - enqueued {}, {}, Context: {} {}", timestamp, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (itemOperation == FlushBuffersItemOperation.singleton()) { long currentMicroBatchSizeSnapshot = currentMicroBatchSize.get(); if (currentMicroBatchSizeSnapshot > 0) { logger.trace( "Flushing PKRange {} (batch size: {}) due to FlushItemOperation, Context: {} {}", thresholds.getPartitionKeyRangeId(), currentMicroBatchSizeSnapshot, this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; } firstRecordTimeStamp.compareAndSet(-1, timestamp); long age = timestamp - firstRecordTimeStamp.get(); long batchSize = currentMicroBatchSize.incrementAndGet(); int totalSerializedLength = this.calculateTotalSerializedLength(currentTotalSerializedLength, itemOperation); if (batchSize >= thresholds.getTargetMicroBatchSizeSnapshot() || age >= this.maxMicroBatchIntervalInMs || totalSerializedLength >= BatchRequestResponseConstants.MAX_DIRECT_MODE_BATCH_REQUEST_BODY_SIZE_IN_BYTES) { logger.debug( "BufferUntil - Flushing PKRange {} due to BatchSize ({}), payload size ({}) or age ({}), " + "Triggering {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), batchSize, totalSerializedLength, age, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; }) .flatMap( (List<Tuple2<Long, CosmosItemOperation>> timeStampAndItemOperationTuples) -> { List<CosmosItemOperation> operations = new ArrayList<>(timeStampAndItemOperationTuples.size()); for (Tuple2<Long, CosmosItemOperation> timeStampAndItemOperationTuple : timeStampAndItemOperationTuples) { CosmosItemOperation itemOperation = timeStampAndItemOperationTuple.getT2(); if (itemOperation == FlushBuffersItemOperation.singleton()) { continue; } operations.add(itemOperation); } logger.debug( "Flushing PKRange {} micro batch with {} operations, Context: {} {}", thresholds.getPartitionKeyRangeId(), operations.size(), this.operationContextText, getThreadInfo()); return executeOperations(operations, thresholds, groupSink); }, ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchConcurrency(this.cosmosBulkExecutionOptions)); } private int calculateTotalSerializedLength(AtomicInteger currentTotalSerializedLength, CosmosItemOperation item) { if (item instanceof CosmosItemOperationBase) { return currentTotalSerializedLength.accumulateAndGet( ((CosmosItemOperationBase) item).getSerializedLength(), (currentValue, incremental) -> currentValue + incremental); } return currentTotalSerializedLength.get(); } private Flux<CosmosBulkOperationResponse<TContext>> executeOperations( List<CosmosItemOperation> operations, PartitionScopeThresholds thresholds, FluxSink<CosmosItemOperation> groupSink) { if (operations.size() == 0) { logger.trace("Empty operations list, Context: {}", this.operationContextText); return Flux.empty(); } String pkRange = thresholds.getPartitionKeyRangeId(); ServerOperationBatchRequest serverOperationBatchRequest = BulkExecutorUtil.createBatchRequest(operations, pkRange); if (serverOperationBatchRequest.getBatchPendingOperations().size() > 0) { serverOperationBatchRequest.getBatchPendingOperations().forEach(groupSink::next); } return Flux.just(serverOperationBatchRequest.getBatchRequest()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((PartitionKeyRangeServerBatchRequest serverRequest) -> this.executePartitionKeyRangeServerBatchRequest(serverRequest, groupSink, thresholds)); } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionKeyRangeServerBatchRequest( PartitionKeyRangeServerBatchRequest serverRequest, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { return this.executeBatchRequest(serverRequest) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(response -> Flux .fromIterable(response.getResults()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosBatchOperationResult result) -> handleTransactionalBatchOperationResult(response, result, groupSink, thresholds))) .onErrorResume((Throwable throwable) -> { if (!(throwable instanceof Exception)) { throw Exceptions.propagate(throwable); } Exception exception = (Exception) throwable; return Flux .fromIterable(serverRequest.getOperations()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosItemOperation itemOperation) -> handleTransactionalBatchExecutionException(itemOperation, exception, groupSink, thresholds)); }); } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchOperationResult( CosmosBatchResponse response, CosmosBatchOperationResult operationResult, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { CosmosBulkItemResponse cosmosBulkItemResponse = ModelBridgeInternal .createCosmosBulkItemResponse(operationResult, response); CosmosItemOperation itemOperation = operationResult.getOperation(); TContext actualContext = this.getActualContext(itemOperation); logger.debug( "HandleTransactionalBatchOperationResult - PKRange {}, Response Status Code {}, " + "Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (!operationResult.isSuccessStatusCode()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy().shouldRetry(operationResult).flatMap( result -> { if (result.shouldRetry) { logger.debug( "HandleTransactionalBatchOperationResult - enqueue retry, PKRange {}, Response " + "Status Code {}, Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return this.enqueueForRetry(result.backOffTime, groupSink, itemOperation, thresholds); } else { logger.error( "HandleTransactionalBatchOperationResult - Fail, PKRange {}, Response Status " + "Code {}, Operation Status Code {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } }); } else { throw new UnsupportedOperationException("Unknown CosmosItemOperation."); } } thresholds.recordSuccessfulOperation(); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } private TContext getActualContext(CosmosItemOperation itemOperation) { ItemBulkOperation<?, ?> itemBulkOperation = null; if (itemOperation instanceof ItemBulkOperation<?, ?>) { itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; } if (itemBulkOperation == null) { return this.batchContext; } TContext operationContext = itemBulkOperation.getContext(); if (operationContext != null) { return operationContext; } return this.batchContext; } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchExecutionException( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { logger.debug( "HandleTransactionalBatchExecutionException, PKRange {}, Error: {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (exception instanceof CosmosException && itemOperation instanceof ItemBulkOperation<?, ?>) { CosmosException cosmosException = (CosmosException) exception; ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy() .shouldRetryForGone(cosmosException.getStatusCode(), cosmosException.getSubStatusCode()) .flatMap(shouldRetryGone -> { if (shouldRetryGone) { logger.debug( "HandleTransactionalBatchExecutionException - Retry due to split, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); mainSink.emitNext(itemOperation, serializedEmitFailureHandler); return Mono.empty(); } else { logger.debug( "HandleTransactionalBatchExecutionException - Retry other, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return retryOtherExceptions( itemOperation, exception, groupSink, cosmosException, itemBulkOperation, thresholds); } }); } TContext actualContext = this.getActualContext(itemOperation); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse(itemOperation, exception, actualContext)); } private Mono<CosmosBulkOperationResponse<TContext>> enqueueForRetry( Duration backOffTime, FluxSink<CosmosItemOperation> groupSink, CosmosItemOperation itemOperation, PartitionScopeThresholds thresholds) { thresholds.recordEnqueuedRetry(); if (backOffTime == null || backOffTime.isZero()) { groupSink.next(itemOperation); return Mono.empty(); } else { return Mono .delay(backOffTime) .flatMap((dummy) -> { groupSink.next(itemOperation); return Mono.empty(); }); } } private Mono<CosmosBulkOperationResponse<TContext>> retryOtherExceptions( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, CosmosException cosmosException, ItemBulkOperation<?, ?> itemBulkOperation, PartitionScopeThresholds thresholds) { TContext actualContext = this.getActualContext(itemOperation); return itemBulkOperation.getRetryPolicy().shouldRetry(cosmosException).flatMap(result -> { if (result.shouldRetry) { return this.enqueueForRetry(result.backOffTime, groupSink, itemBulkOperation, thresholds); } else { return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, exception, actualContext)); } }); } private Mono<CosmosBatchResponse> executeBatchRequest(PartitionKeyRangeServerBatchRequest serverRequest) { RequestOptions options = new RequestOptions(); options.setOperationContextAndListenerTuple(operationListener); if (!this.docClientWrapper.isContentResponseOnWriteEnabled() && serverRequest.getOperations().size() > 0) { for (CosmosItemOperation itemOperation : serverRequest.getOperations()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; if (itemBulkOperation.getOperationType() == CosmosItemOperationType.READ || (itemBulkOperation.getRequestOptions() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled().booleanValue())) { options.setContentResponseOnWriteEnabled(true); break; } } } } return this.docClientWrapper.executeBatchRequest( BridgeInternal.getLink(this.container), serverRequest, options, false); } private void completeAllSinks() { logger.info("Closing all sinks, Context: {}", this.operationContextText); executorService.shutdown(); logger.debug("Executor service shut down, Context: {}", this.operationContextText); mainSink.tryEmitComplete(); logger.debug("Main sink completed, Context: {}", this.operationContextText); groupSinks.forEach(FluxSink::complete); logger.debug("All group sinks completed, Context: {}", this.operationContextText); try { this.executorService.shutdown(); logger.debug("Shutting down the executor service"); } catch (Exception e) { logger.warn("Failed to shut down the executor service", e); } } private void onFlush() { try { this.groupSinks.forEach(sink -> sink.next(FlushBuffersItemOperation.singleton())); } catch(Throwable t) { logger.error("Callback invocation 'onFlush' failed.", t); } } private static String getItemOperationDiagnostics(CosmosItemOperation operation) { if (operation == FlushBuffersItemOperation.singleton()) { return "ItemOperation[Type: Flush]"; } StringBuilder sb = new StringBuilder(); sb .append("ItemOperation[Type: ") .append(operation.getOperationType().toString()) .append(", PK: ") .append(operation.getPartitionKeyValue() != null ? operation.getPartitionKeyValue().toString() : "n/a") .append(", id: ") .append(operation.getId()) .append("]"); return sb.toString(); } private static String getThreadInfo() { StringBuilder sb = new StringBuilder(); Thread t = Thread.currentThread(); sb .append("Thread[") .append("Name: ") .append(t.getName()) .append(",Group: ") .append(t.getThreadGroup() != null ? t.getThreadGroup().getName() : "n/a") .append(", isDaemon: ") .append(t.isDaemon()) .append(", Id: ") .append(t.getId()) .append("]"); return sb.toString(); } private class SerializedEmitFailureHandler implements Sinks.EmitFailureHandler { @Override public boolean onEmitFailure(SignalType signalType, Sinks.EmitResult emitResult) { logger.debug("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); if (emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED)) { return true; } return false; } } }
class BulkExecutor<TContext> { private final static Logger logger = LoggerFactory.getLogger(BulkExecutor.class); private final static AtomicLong instanceCount = new AtomicLong(0); private final CosmosAsyncContainer container; private final AsyncDocumentClient docClientWrapper; private final String operationContextText; private final OperationContextAndListenerTuple operationListener; private final ThrottlingRetryOptions throttlingRetryOptions; private final Flux<com.azure.cosmos.models.CosmosItemOperation> inputOperations; private final Long maxMicroBatchIntervalInMs; private final TContext batchContext; private final ConcurrentMap<String, PartitionScopeThresholds> partitionScopeThresholds; private final CosmosBulkExecutionOptions cosmosBulkExecutionOptions; private final AtomicBoolean mainSourceCompleted; private final AtomicInteger totalCount; private final Sinks.EmitFailureHandler serializedEmitFailureHandler; private final Sinks.Many<CosmosItemOperation> mainSink; private final List<FluxSink<CosmosItemOperation>> groupSinks; private final ScheduledExecutorService executorService; private ScheduledFuture<?> scheduledFutureForFlush; public BulkExecutor(CosmosAsyncContainer container, Flux<CosmosItemOperation> inputOperations, CosmosBulkExecutionOptions cosmosBulkOptions) { checkNotNull(container, "expected non-null container"); checkNotNull(inputOperations, "expected non-null inputOperations"); checkNotNull(cosmosBulkOptions, "expected non-null bulkOptions"); this.cosmosBulkExecutionOptions = cosmosBulkOptions; this.container = container; this.inputOperations = inputOperations; this.docClientWrapper = CosmosBridgeInternal.getAsyncDocumentClient(container.getDatabase()); this.throttlingRetryOptions = docClientWrapper.getConnectionPolicy().getThrottlingRetryOptions(); maxMicroBatchIntervalInMs = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchInterval(cosmosBulkExecutionOptions) .toMillis(); batchContext = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getLegacyBatchScopedContext(cosmosBulkExecutionOptions); this.partitionScopeThresholds = ImplementationBridgeHelpers.CosmosBulkExecutionThresholdsStateHelper .getBulkExecutionThresholdsAccessor() .getPartitionScopeThresholds(cosmosBulkExecutionOptions.getThresholdsState()); operationListener = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getOperationContext(cosmosBulkExecutionOptions); if (operationListener != null && operationListener.getOperationContext() != null) { operationContextText = operationListener.getOperationContext().toString(); } else { operationContextText = "n/a"; } mainSourceCompleted = new AtomicBoolean(false); totalCount = new AtomicInteger(0); serializedEmitFailureHandler = new SerializedEmitFailureHandler(); mainSink = Sinks.many().unicast().onBackpressureBuffer(); groupSinks = new CopyOnWriteArrayList<>(); this.executorService = Executors.newSingleThreadScheduledExecutor( new CosmosDaemonThreadFactory("BulkExecutor-" + instanceCount.incrementAndGet())); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, this.maxMicroBatchIntervalInMs, this.maxMicroBatchIntervalInMs, TimeUnit.MILLISECONDS); } public Flux<CosmosBulkOperationResponse<TContext>> execute() { } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionedGroup( GroupedFlux<PartitionScopeThresholds, CosmosItemOperation> partitionedGroupFluxOfInputOperations) { final PartitionScopeThresholds thresholds = partitionedGroupFluxOfInputOperations.key(); final FluxProcessor<CosmosItemOperation, CosmosItemOperation> groupFluxProcessor = UnicastProcessor.<CosmosItemOperation>create().serialize(); final FluxSink<CosmosItemOperation> groupSink = groupFluxProcessor.sink(FluxSink.OverflowStrategy.BUFFER); groupSinks.add(groupSink); AtomicLong firstRecordTimeStamp = new AtomicLong(-1); AtomicLong currentMicroBatchSize = new AtomicLong(0); AtomicInteger currentTotalSerializedLength = new AtomicInteger(0); return partitionedGroupFluxOfInputOperations .mergeWith(groupFluxProcessor) .onBackpressureBuffer() .timestamp() .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .bufferUntil(timeStampItemOperationTuple -> { long timestamp = timeStampItemOperationTuple.getT1(); CosmosItemOperation itemOperation = timeStampItemOperationTuple.getT2(); logger.trace( "BufferUntil - enqueued {}, {}, Context: {} {}", timestamp, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (itemOperation == FlushBuffersItemOperation.singleton()) { long currentMicroBatchSizeSnapshot = currentMicroBatchSize.get(); if (currentMicroBatchSizeSnapshot > 0) { logger.trace( "Flushing PKRange {} (batch size: {}) due to FlushItemOperation, Context: {} {}", thresholds.getPartitionKeyRangeId(), currentMicroBatchSizeSnapshot, this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; } firstRecordTimeStamp.compareAndSet(-1, timestamp); long age = timestamp - firstRecordTimeStamp.get(); long batchSize = currentMicroBatchSize.incrementAndGet(); int totalSerializedLength = this.calculateTotalSerializedLength(currentTotalSerializedLength, itemOperation); if (batchSize >= thresholds.getTargetMicroBatchSizeSnapshot() || age >= this.maxMicroBatchIntervalInMs || totalSerializedLength >= BatchRequestResponseConstants.MAX_DIRECT_MODE_BATCH_REQUEST_BODY_SIZE_IN_BYTES) { logger.debug( "BufferUntil - Flushing PKRange {} due to BatchSize ({}), payload size ({}) or age ({}), " + "Triggering {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), batchSize, totalSerializedLength, age, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; }) .flatMap( (List<Tuple2<Long, CosmosItemOperation>> timeStampAndItemOperationTuples) -> { List<CosmosItemOperation> operations = new ArrayList<>(timeStampAndItemOperationTuples.size()); for (Tuple2<Long, CosmosItemOperation> timeStampAndItemOperationTuple : timeStampAndItemOperationTuples) { CosmosItemOperation itemOperation = timeStampAndItemOperationTuple.getT2(); if (itemOperation == FlushBuffersItemOperation.singleton()) { continue; } operations.add(itemOperation); } logger.debug( "Flushing PKRange {} micro batch with {} operations, Context: {} {}", thresholds.getPartitionKeyRangeId(), operations.size(), this.operationContextText, getThreadInfo()); return executeOperations(operations, thresholds, groupSink); }, ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchConcurrency(this.cosmosBulkExecutionOptions)); } private int calculateTotalSerializedLength(AtomicInteger currentTotalSerializedLength, CosmosItemOperation item) { if (item instanceof CosmosItemOperationBase) { return currentTotalSerializedLength.accumulateAndGet( ((CosmosItemOperationBase) item).getSerializedLength(), (currentValue, incremental) -> currentValue + incremental); } return currentTotalSerializedLength.get(); } private Flux<CosmosBulkOperationResponse<TContext>> executeOperations( List<CosmosItemOperation> operations, PartitionScopeThresholds thresholds, FluxSink<CosmosItemOperation> groupSink) { if (operations.size() == 0) { logger.trace("Empty operations list, Context: {}", this.operationContextText); return Flux.empty(); } String pkRange = thresholds.getPartitionKeyRangeId(); ServerOperationBatchRequest serverOperationBatchRequest = BulkExecutorUtil.createBatchRequest(operations, pkRange); if (serverOperationBatchRequest.getBatchPendingOperations().size() > 0) { serverOperationBatchRequest.getBatchPendingOperations().forEach(groupSink::next); } return Flux.just(serverOperationBatchRequest.getBatchRequest()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((PartitionKeyRangeServerBatchRequest serverRequest) -> this.executePartitionKeyRangeServerBatchRequest(serverRequest, groupSink, thresholds)); } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionKeyRangeServerBatchRequest( PartitionKeyRangeServerBatchRequest serverRequest, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { return this.executeBatchRequest(serverRequest) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(response -> Flux .fromIterable(response.getResults()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosBatchOperationResult result) -> handleTransactionalBatchOperationResult(response, result, groupSink, thresholds))) .onErrorResume((Throwable throwable) -> { if (!(throwable instanceof Exception)) { throw Exceptions.propagate(throwable); } Exception exception = (Exception) throwable; return Flux .fromIterable(serverRequest.getOperations()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosItemOperation itemOperation) -> handleTransactionalBatchExecutionException(itemOperation, exception, groupSink, thresholds)); }); } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchOperationResult( CosmosBatchResponse response, CosmosBatchOperationResult operationResult, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { CosmosBulkItemResponse cosmosBulkItemResponse = ModelBridgeInternal .createCosmosBulkItemResponse(operationResult, response); CosmosItemOperation itemOperation = operationResult.getOperation(); TContext actualContext = this.getActualContext(itemOperation); logger.debug( "HandleTransactionalBatchOperationResult - PKRange {}, Response Status Code {}, " + "Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (!operationResult.isSuccessStatusCode()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy().shouldRetry(operationResult).flatMap( result -> { if (result.shouldRetry) { logger.debug( "HandleTransactionalBatchOperationResult - enqueue retry, PKRange {}, Response " + "Status Code {}, Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return this.enqueueForRetry(result.backOffTime, groupSink, itemOperation, thresholds); } else { logger.error( "HandleTransactionalBatchOperationResult - Fail, PKRange {}, Response Status " + "Code {}, Operation Status Code {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } }); } else { throw new UnsupportedOperationException("Unknown CosmosItemOperation."); } } thresholds.recordSuccessfulOperation(); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } private TContext getActualContext(CosmosItemOperation itemOperation) { ItemBulkOperation<?, ?> itemBulkOperation = null; if (itemOperation instanceof ItemBulkOperation<?, ?>) { itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; } if (itemBulkOperation == null) { return this.batchContext; } TContext operationContext = itemBulkOperation.getContext(); if (operationContext != null) { return operationContext; } return this.batchContext; } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchExecutionException( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { logger.debug( "HandleTransactionalBatchExecutionException, PKRange {}, Error: {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (exception instanceof CosmosException && itemOperation instanceof ItemBulkOperation<?, ?>) { CosmosException cosmosException = (CosmosException) exception; ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy() .shouldRetryForGone(cosmosException.getStatusCode(), cosmosException.getSubStatusCode()) .flatMap(shouldRetryGone -> { if (shouldRetryGone) { logger.debug( "HandleTransactionalBatchExecutionException - Retry due to split, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); mainSink.emitNext(itemOperation, serializedEmitFailureHandler); return Mono.empty(); } else { logger.debug( "HandleTransactionalBatchExecutionException - Retry other, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return retryOtherExceptions( itemOperation, exception, groupSink, cosmosException, itemBulkOperation, thresholds); } }); } TContext actualContext = this.getActualContext(itemOperation); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse(itemOperation, exception, actualContext)); } private Mono<CosmosBulkOperationResponse<TContext>> enqueueForRetry( Duration backOffTime, FluxSink<CosmosItemOperation> groupSink, CosmosItemOperation itemOperation, PartitionScopeThresholds thresholds) { thresholds.recordEnqueuedRetry(); if (backOffTime == null || backOffTime.isZero()) { groupSink.next(itemOperation); return Mono.empty(); } else { return Mono .delay(backOffTime) .flatMap((dummy) -> { groupSink.next(itemOperation); return Mono.empty(); }); } } private Mono<CosmosBulkOperationResponse<TContext>> retryOtherExceptions( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, CosmosException cosmosException, ItemBulkOperation<?, ?> itemBulkOperation, PartitionScopeThresholds thresholds) { TContext actualContext = this.getActualContext(itemOperation); return itemBulkOperation.getRetryPolicy().shouldRetry(cosmosException).flatMap(result -> { if (result.shouldRetry) { return this.enqueueForRetry(result.backOffTime, groupSink, itemBulkOperation, thresholds); } else { return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, exception, actualContext)); } }); } private Mono<CosmosBatchResponse> executeBatchRequest(PartitionKeyRangeServerBatchRequest serverRequest) { RequestOptions options = new RequestOptions(); options.setOperationContextAndListenerTuple(operationListener); if (!this.docClientWrapper.isContentResponseOnWriteEnabled() && serverRequest.getOperations().size() > 0) { for (CosmosItemOperation itemOperation : serverRequest.getOperations()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; if (itemBulkOperation.getOperationType() == CosmosItemOperationType.READ || (itemBulkOperation.getRequestOptions() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled().booleanValue())) { options.setContentResponseOnWriteEnabled(true); break; } } } } return this.docClientWrapper.executeBatchRequest( BridgeInternal.getLink(this.container), serverRequest, options, false); } private void completeAllSinks() { logger.info("Closing all sinks, Context: {}", this.operationContextText); executorService.shutdown(); logger.debug("Executor service shut down, Context: {}", this.operationContextText); mainSink.tryEmitComplete(); logger.debug("Main sink completed, Context: {}", this.operationContextText); groupSinks.forEach(FluxSink::complete); logger.debug("All group sinks completed, Context: {}", this.operationContextText); try { this.executorService.shutdown(); logger.debug("Shutting down the executor service"); } catch (Exception e) { logger.warn("Failed to shut down the executor service", e); } } private void onFlush() { try { this.groupSinks.forEach(sink -> sink.next(FlushBuffersItemOperation.singleton())); } catch(Throwable t) { logger.error("Callback invocation 'onFlush' failed.", t); } } private static String getItemOperationDiagnostics(CosmosItemOperation operation) { if (operation == FlushBuffersItemOperation.singleton()) { return "ItemOperation[Type: Flush]"; } StringBuilder sb = new StringBuilder(); sb .append("ItemOperation[Type: ") .append(operation.getOperationType().toString()) .append(", PK: ") .append(operation.getPartitionKeyValue() != null ? operation.getPartitionKeyValue().toString() : "n/a") .append(", id: ") .append(operation.getId()) .append("]"); return sb.toString(); } private static String getThreadInfo() { StringBuilder sb = new StringBuilder(); Thread t = Thread.currentThread(); sb .append("Thread[") .append("Name: ") .append(t.getName()) .append(",Group: ") .append(t.getThreadGroup() != null ? t.getThreadGroup().getName() : "n/a") .append(", isDaemon: ") .append(t.isDaemon()) .append(", Id: ") .append(t.getId()) .append("]"); return sb.toString(); } private class SerializedEmitFailureHandler implements Sinks.EmitFailureHandler { @Override public boolean onEmitFailure(SignalType signalType, Sinks.EmitResult emitResult) { if (emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED)) { logger.debug("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); return true; } logger.error("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); return false; } } }
typo: can bet sed -> can be used
public Flux<CosmosBulkOperationResponse<TContext>> execute() { Integer nullableMaxConcurrentCosmosPartitions = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxConcurrentCosmosPartitions(cosmosBulkExecutionOptions); Mono<Integer> maxConcurrentCosmosPartitionsMono = nullableMaxConcurrentCosmosPartitions != null ? Mono.just(Math.max(256, nullableMaxConcurrentCosmosPartitions)) : this.container.getFeedRanges().map(ranges -> Math.max(256, ranges.size() * 2)); return maxConcurrentCosmosPartitionsMono .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(maxConcurrentCosmosPartitions -> { logger.debug("BulkExecutor.execute with MaxConcurrentPartitions: {}, Context: {}", maxConcurrentCosmosPartitions, this.operationContextText); return this.inputOperations .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .onErrorContinue((throwable, o) -> logger.error("Skipping an error operation while processing {}. Cause: {}, Context: {}", o, throwable.getMessage(), this.operationContextText)) .doOnNext((CosmosItemOperation cosmosItemOperation) -> { BulkExecutorUtil.setRetryPolicyForBulk( docClientWrapper, this.container, cosmosItemOperation, this.throttlingRetryOptions); if (cosmosItemOperation != FlushBuffersItemOperation.singleton()) { totalCount.incrementAndGet(); } logger.trace( "SetupRetryPolicy, {}, TotalCount: {}, Context: {}, {}", getItemOperationDiagnostics(cosmosItemOperation), totalCount.get(), this.operationContextText, getThreadInfo() ); }) .doOnComplete(() -> { mainSourceCompleted.set(true); long totalCountSnapshot = totalCount.get(); logger.debug("Main source completed - totalCountSnapshot, this.operationContextText); if (totalCountSnapshot == 0) { completeAllSinks(); } else { ScheduledFuture<?> scheduledFutureSnapshot = this.scheduledFutureForFlush; if (scheduledFutureSnapshot != null) { try { scheduledFutureSnapshot.cancel(true); logger.debug("Cancelled all future scheduled tasks {}", getThreadInfo()); } catch (Exception e) { logger.warn("Failed to cancel scheduled tasks{}", getThreadInfo(), e); } } this.onFlush(); long flushIntervalAfterDrainingIncomingFlux = Math.min( this.maxMicroBatchIntervalInMs, BatchRequestResponseConstants .DEFAULT_MAX_MICRO_BATCH_INTERVAL_AFTER_DRAINING_INCOMING_FLUX_IN_MILLISECONDS); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, flushIntervalAfterDrainingIncomingFlux, flushIntervalAfterDrainingIncomingFlux, TimeUnit.MILLISECONDS); } }) .mergeWith(mainSink.asFlux()) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap( operation -> { logger.trace("Before Resolve PkRangeId, {}, Context: {} {}", getItemOperationDiagnostics(operation), this.operationContextText, getThreadInfo()); return BulkExecutorUtil.resolvePartitionKeyRangeId(this.docClientWrapper, this.container, operation) .map((String pkRangeId) -> { PartitionScopeThresholds partitionScopeThresholds = this.partitionScopeThresholds.computeIfAbsent( pkRangeId, (newPkRangeId) -> new PartitionScopeThresholds(newPkRangeId, this.cosmosBulkExecutionOptions)); logger.trace("Resolved PkRangeId, {}, PKRangeId: {} Context: {} {}", getItemOperationDiagnostics(operation), pkRangeId, this.operationContextText, getThreadInfo()); return Pair.of(partitionScopeThresholds, operation); }); }) .groupBy(Pair::getKey, Pair::getValue) .flatMap( this::executePartitionedGroup, maxConcurrentCosmosPartitions) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .doOnNext(requestAndResponse -> { int totalCountAfterDecrement = totalCount.decrementAndGet(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountAfterDecrement == 0 && mainSourceCompletedSnapshot) { logger.debug("All work completed, {}, TotalCount: {}, Context: {} {}", getItemOperationDiagnostics(requestAndResponse.getOperation()), totalCountAfterDecrement, this.operationContextText, getThreadInfo()); completeAllSinks(); } else { logger.debug( "Work left - TotalCount after decrement: {}, main sink completed {}, {}, Context: {} {}", totalCountAfterDecrement, mainSourceCompletedSnapshot, getItemOperationDiagnostics(requestAndResponse.getOperation()), this.operationContextText, getThreadInfo()); } }) .doOnComplete(() -> { int totalCountSnapshot = totalCount.get(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountSnapshot == 0 && mainSourceCompletedSnapshot) { logger.debug("DoOnComplete: All work completed, Context: {}", this.operationContextText); completeAllSinks(); } else { logger.debug( "DoOnComplete: Work left - TotalCount after decrement: {}, main sink completed {}, Context: {} {}", totalCountSnapshot, mainSourceCompletedSnapshot, this.operationContextText, getThreadInfo()); } }); }); }
default concurrency (256), Integer nullableMaxConcurrentCosmosPartitions = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxConcurrentCosmosPartitions(cosmosBulkExecutionOptions); Mono<Integer> maxConcurrentCosmosPartitionsMono = nullableMaxConcurrentCosmosPartitions != null ? Mono.just(Math.max(256, nullableMaxConcurrentCosmosPartitions)) : this.container.getFeedRanges().map(ranges -> Math.max(256, ranges.size() * 2)); return maxConcurrentCosmosPartitionsMono .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(maxConcurrentCosmosPartitions -> { logger.debug("BulkExecutor.execute with MaxConcurrentPartitions: {}, Context: {}", maxConcurrentCosmosPartitions, this.operationContextText); return this.inputOperations .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .onErrorContinue((throwable, o) -> logger.error("Skipping an error operation while processing {}. Cause: {}, Context: {}", o, throwable.getMessage(), this.operationContextText)) .doOnNext((CosmosItemOperation cosmosItemOperation) -> { BulkExecutorUtil.setRetryPolicyForBulk( docClientWrapper, this.container, cosmosItemOperation, this.throttlingRetryOptions); if (cosmosItemOperation != FlushBuffersItemOperation.singleton()) { totalCount.incrementAndGet(); } logger.trace( "SetupRetryPolicy, {}, TotalCount: {}, Context: {}, {}", getItemOperationDiagnostics(cosmosItemOperation), totalCount.get(), this.operationContextText, getThreadInfo() ); }) .doOnComplete(() -> { mainSourceCompleted.set(true); long totalCountSnapshot = totalCount.get(); logger.debug("Main source completed - totalCountSnapshot, this.operationContextText); if (totalCountSnapshot == 0) { completeAllSinks(); } else { ScheduledFuture<?> scheduledFutureSnapshot = this.scheduledFutureForFlush; if (scheduledFutureSnapshot != null) { try { scheduledFutureSnapshot.cancel(true); logger.debug("Cancelled all future scheduled tasks {}", getThreadInfo()); } catch (Exception e) { logger.warn("Failed to cancel scheduled tasks{}", getThreadInfo(), e); } } this.onFlush(); long flushIntervalAfterDrainingIncomingFlux = Math.min( this.maxMicroBatchIntervalInMs, BatchRequestResponseConstants .DEFAULT_MAX_MICRO_BATCH_INTERVAL_AFTER_DRAINING_INCOMING_FLUX_IN_MILLISECONDS); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, flushIntervalAfterDrainingIncomingFlux, flushIntervalAfterDrainingIncomingFlux, TimeUnit.MILLISECONDS); } }) .mergeWith(mainSink.asFlux()) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap( operation -> { logger.trace("Before Resolve PkRangeId, {}, Context: {} {}", getItemOperationDiagnostics(operation), this.operationContextText, getThreadInfo()); return BulkExecutorUtil.resolvePartitionKeyRangeId(this.docClientWrapper, this.container, operation) .map((String pkRangeId) -> { PartitionScopeThresholds partitionScopeThresholds = this.partitionScopeThresholds.computeIfAbsent( pkRangeId, (newPkRangeId) -> new PartitionScopeThresholds(newPkRangeId, this.cosmosBulkExecutionOptions)); logger.trace("Resolved PkRangeId, {}, PKRangeId: {} Context: {} {}", getItemOperationDiagnostics(operation), pkRangeId, this.operationContextText, getThreadInfo()); return Pair.of(partitionScopeThresholds, operation); }); }) .groupBy(Pair::getKey, Pair::getValue) .flatMap( this::executePartitionedGroup, maxConcurrentCosmosPartitions) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .doOnNext(requestAndResponse -> { int totalCountAfterDecrement = totalCount.decrementAndGet(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountAfterDecrement == 0 && mainSourceCompletedSnapshot) { logger.debug("All work completed, {}, TotalCount: {}, Context: {} {}", getItemOperationDiagnostics(requestAndResponse.getOperation()), totalCountAfterDecrement, this.operationContextText, getThreadInfo()); completeAllSinks(); } else { logger.debug( "Work left - TotalCount after decrement: {}, main sink completed {}, {}, Context: {} {}", totalCountAfterDecrement, mainSourceCompletedSnapshot, getItemOperationDiagnostics(requestAndResponse.getOperation()), this.operationContextText, getThreadInfo()); } }) .doOnComplete(() -> { int totalCountSnapshot = totalCount.get(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountSnapshot == 0 && mainSourceCompletedSnapshot) { logger.debug("DoOnComplete: All work completed, Context: {}", this.operationContextText); completeAllSinks(); } else { logger.debug( "DoOnComplete: Work left - TotalCount after decrement: {}, main sink completed {}, Context: {} {}", totalCountSnapshot, mainSourceCompletedSnapshot, this.operationContextText, getThreadInfo()); } }); }
class BulkExecutor<TContext> { private final static Logger logger = LoggerFactory.getLogger(BulkExecutor.class); private final static AtomicLong instanceCount = new AtomicLong(0); private final CosmosAsyncContainer container; private final AsyncDocumentClient docClientWrapper; private final String operationContextText; private final OperationContextAndListenerTuple operationListener; private final ThrottlingRetryOptions throttlingRetryOptions; private final Flux<com.azure.cosmos.models.CosmosItemOperation> inputOperations; private final Long maxMicroBatchIntervalInMs; private final TContext batchContext; private final ConcurrentMap<String, PartitionScopeThresholds> partitionScopeThresholds; private final CosmosBulkExecutionOptions cosmosBulkExecutionOptions; private final AtomicBoolean mainSourceCompleted; private final AtomicInteger totalCount; private final Sinks.EmitFailureHandler serializedEmitFailureHandler; private final Sinks.Many<CosmosItemOperation> mainSink; private final List<FluxSink<CosmosItemOperation>> groupSinks; private final ScheduledExecutorService executorService; private ScheduledFuture<?> scheduledFutureForFlush; public BulkExecutor(CosmosAsyncContainer container, Flux<CosmosItemOperation> inputOperations, CosmosBulkExecutionOptions cosmosBulkOptions) { checkNotNull(container, "expected non-null container"); checkNotNull(inputOperations, "expected non-null inputOperations"); checkNotNull(cosmosBulkOptions, "expected non-null bulkOptions"); this.cosmosBulkExecutionOptions = cosmosBulkOptions; this.container = container; this.inputOperations = inputOperations; this.docClientWrapper = CosmosBridgeInternal.getAsyncDocumentClient(container.getDatabase()); this.throttlingRetryOptions = docClientWrapper.getConnectionPolicy().getThrottlingRetryOptions(); maxMicroBatchIntervalInMs = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchInterval(cosmosBulkExecutionOptions) .toMillis(); batchContext = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getLegacyBatchScopedContext(cosmosBulkExecutionOptions); this.partitionScopeThresholds = ImplementationBridgeHelpers.CosmosBulkExecutionThresholdsStateHelper .getBulkExecutionThresholdsAccessor() .getPartitionScopeThresholds(cosmosBulkExecutionOptions.getThresholdsState()); operationListener = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getOperationContext(cosmosBulkExecutionOptions); if (operationListener != null && operationListener.getOperationContext() != null) { operationContextText = operationListener.getOperationContext().toString(); } else { operationContextText = "n/a"; } mainSourceCompleted = new AtomicBoolean(false); totalCount = new AtomicInteger(0); serializedEmitFailureHandler = new SerializedEmitFailureHandler(); mainSink = Sinks.many().unicast().onBackpressureBuffer(); groupSinks = new CopyOnWriteArrayList<>(); this.executorService = Executors.newSingleThreadScheduledExecutor( new CosmosDaemonThreadFactory("BulkExecutor-" + instanceCount.incrementAndGet())); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, this.maxMicroBatchIntervalInMs, this.maxMicroBatchIntervalInMs, TimeUnit.MILLISECONDS); } public Flux<CosmosBulkOperationResponse<TContext>> execute() { } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionedGroup( GroupedFlux<PartitionScopeThresholds, CosmosItemOperation> partitionedGroupFluxOfInputOperations) { final PartitionScopeThresholds thresholds = partitionedGroupFluxOfInputOperations.key(); final FluxProcessor<CosmosItemOperation, CosmosItemOperation> groupFluxProcessor = UnicastProcessor.<CosmosItemOperation>create().serialize(); final FluxSink<CosmosItemOperation> groupSink = groupFluxProcessor.sink(FluxSink.OverflowStrategy.BUFFER); groupSinks.add(groupSink); AtomicLong firstRecordTimeStamp = new AtomicLong(-1); AtomicLong currentMicroBatchSize = new AtomicLong(0); AtomicInteger currentTotalSerializedLength = new AtomicInteger(0); return partitionedGroupFluxOfInputOperations .mergeWith(groupFluxProcessor) .onBackpressureBuffer() .timestamp() .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .bufferUntil(timeStampItemOperationTuple -> { long timestamp = timeStampItemOperationTuple.getT1(); CosmosItemOperation itemOperation = timeStampItemOperationTuple.getT2(); logger.trace( "BufferUntil - enqueued {}, {}, Context: {} {}", timestamp, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (itemOperation == FlushBuffersItemOperation.singleton()) { long currentMicroBatchSizeSnapshot = currentMicroBatchSize.get(); if (currentMicroBatchSizeSnapshot > 0) { logger.trace( "Flushing PKRange {} (batch size: {}) due to FlushItemOperation, Context: {} {}", thresholds.getPartitionKeyRangeId(), currentMicroBatchSizeSnapshot, this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; } firstRecordTimeStamp.compareAndSet(-1, timestamp); long age = timestamp - firstRecordTimeStamp.get(); long batchSize = currentMicroBatchSize.incrementAndGet(); int totalSerializedLength = this.calculateTotalSerializedLength(currentTotalSerializedLength, itemOperation); if (batchSize >= thresholds.getTargetMicroBatchSizeSnapshot() || age >= this.maxMicroBatchIntervalInMs || totalSerializedLength >= BatchRequestResponseConstants.MAX_DIRECT_MODE_BATCH_REQUEST_BODY_SIZE_IN_BYTES) { logger.debug( "BufferUntil - Flushing PKRange {} due to BatchSize ({}), payload size ({}) or age ({}), " + "Triggering {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), batchSize, totalSerializedLength, age, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; }) .flatMap( (List<Tuple2<Long, CosmosItemOperation>> timeStampAndItemOperationTuples) -> { List<CosmosItemOperation> operations = new ArrayList<>(timeStampAndItemOperationTuples.size()); for (Tuple2<Long, CosmosItemOperation> timeStampAndItemOperationTuple : timeStampAndItemOperationTuples) { CosmosItemOperation itemOperation = timeStampAndItemOperationTuple.getT2(); if (itemOperation == FlushBuffersItemOperation.singleton()) { continue; } operations.add(itemOperation); } logger.debug( "Flushing PKRange {} micro batch with {} operations, Context: {} {}", thresholds.getPartitionKeyRangeId(), operations.size(), this.operationContextText, getThreadInfo()); return executeOperations(operations, thresholds, groupSink); }, ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchConcurrency(this.cosmosBulkExecutionOptions)); } private int calculateTotalSerializedLength(AtomicInteger currentTotalSerializedLength, CosmosItemOperation item) { if (item instanceof CosmosItemOperationBase) { return currentTotalSerializedLength.accumulateAndGet( ((CosmosItemOperationBase) item).getSerializedLength(), (currentValue, incremental) -> currentValue + incremental); } return currentTotalSerializedLength.get(); } private Flux<CosmosBulkOperationResponse<TContext>> executeOperations( List<CosmosItemOperation> operations, PartitionScopeThresholds thresholds, FluxSink<CosmosItemOperation> groupSink) { if (operations.size() == 0) { logger.trace("Empty operations list, Context: {}", this.operationContextText); return Flux.empty(); } String pkRange = thresholds.getPartitionKeyRangeId(); ServerOperationBatchRequest serverOperationBatchRequest = BulkExecutorUtil.createBatchRequest(operations, pkRange); if (serverOperationBatchRequest.getBatchPendingOperations().size() > 0) { serverOperationBatchRequest.getBatchPendingOperations().forEach(groupSink::next); } return Flux.just(serverOperationBatchRequest.getBatchRequest()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((PartitionKeyRangeServerBatchRequest serverRequest) -> this.executePartitionKeyRangeServerBatchRequest(serverRequest, groupSink, thresholds)); } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionKeyRangeServerBatchRequest( PartitionKeyRangeServerBatchRequest serverRequest, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { return this.executeBatchRequest(serverRequest) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(response -> Flux .fromIterable(response.getResults()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosBatchOperationResult result) -> handleTransactionalBatchOperationResult(response, result, groupSink, thresholds))) .onErrorResume((Throwable throwable) -> { if (!(throwable instanceof Exception)) { throw Exceptions.propagate(throwable); } Exception exception = (Exception) throwable; return Flux .fromIterable(serverRequest.getOperations()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosItemOperation itemOperation) -> handleTransactionalBatchExecutionException(itemOperation, exception, groupSink, thresholds)); }); } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchOperationResult( CosmosBatchResponse response, CosmosBatchOperationResult operationResult, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { CosmosBulkItemResponse cosmosBulkItemResponse = ModelBridgeInternal .createCosmosBulkItemResponse(operationResult, response); CosmosItemOperation itemOperation = operationResult.getOperation(); TContext actualContext = this.getActualContext(itemOperation); logger.debug( "HandleTransactionalBatchOperationResult - PKRange {}, Response Status Code {}, " + "Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (!operationResult.isSuccessStatusCode()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy().shouldRetry(operationResult).flatMap( result -> { if (result.shouldRetry) { logger.debug( "HandleTransactionalBatchOperationResult - enqueue retry, PKRange {}, Response " + "Status Code {}, Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return this.enqueueForRetry(result.backOffTime, groupSink, itemOperation, thresholds); } else { logger.error( "HandleTransactionalBatchOperationResult - Fail, PKRange {}, Response Status " + "Code {}, Operation Status Code {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } }); } else { throw new UnsupportedOperationException("Unknown CosmosItemOperation."); } } thresholds.recordSuccessfulOperation(); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } private TContext getActualContext(CosmosItemOperation itemOperation) { ItemBulkOperation<?, ?> itemBulkOperation = null; if (itemOperation instanceof ItemBulkOperation<?, ?>) { itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; } if (itemBulkOperation == null) { return this.batchContext; } TContext operationContext = itemBulkOperation.getContext(); if (operationContext != null) { return operationContext; } return this.batchContext; } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchExecutionException( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { logger.debug( "HandleTransactionalBatchExecutionException, PKRange {}, Error: {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (exception instanceof CosmosException && itemOperation instanceof ItemBulkOperation<?, ?>) { CosmosException cosmosException = (CosmosException) exception; ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy() .shouldRetryForGone(cosmosException.getStatusCode(), cosmosException.getSubStatusCode()) .flatMap(shouldRetryGone -> { if (shouldRetryGone) { logger.debug( "HandleTransactionalBatchExecutionException - Retry due to split, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); mainSink.emitNext(itemOperation, serializedEmitFailureHandler); return Mono.empty(); } else { logger.debug( "HandleTransactionalBatchExecutionException - Retry other, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return retryOtherExceptions( itemOperation, exception, groupSink, cosmosException, itemBulkOperation, thresholds); } }); } TContext actualContext = this.getActualContext(itemOperation); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse(itemOperation, exception, actualContext)); } private Mono<CosmosBulkOperationResponse<TContext>> enqueueForRetry( Duration backOffTime, FluxSink<CosmosItemOperation> groupSink, CosmosItemOperation itemOperation, PartitionScopeThresholds thresholds) { thresholds.recordEnqueuedRetry(); if (backOffTime == null || backOffTime.isZero()) { groupSink.next(itemOperation); return Mono.empty(); } else { return Mono .delay(backOffTime) .flatMap((dummy) -> { groupSink.next(itemOperation); return Mono.empty(); }); } } private Mono<CosmosBulkOperationResponse<TContext>> retryOtherExceptions( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, CosmosException cosmosException, ItemBulkOperation<?, ?> itemBulkOperation, PartitionScopeThresholds thresholds) { TContext actualContext = this.getActualContext(itemOperation); return itemBulkOperation.getRetryPolicy().shouldRetry(cosmosException).flatMap(result -> { if (result.shouldRetry) { return this.enqueueForRetry(result.backOffTime, groupSink, itemBulkOperation, thresholds); } else { return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, exception, actualContext)); } }); } private Mono<CosmosBatchResponse> executeBatchRequest(PartitionKeyRangeServerBatchRequest serverRequest) { RequestOptions options = new RequestOptions(); options.setOperationContextAndListenerTuple(operationListener); if (!this.docClientWrapper.isContentResponseOnWriteEnabled() && serverRequest.getOperations().size() > 0) { for (CosmosItemOperation itemOperation : serverRequest.getOperations()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; if (itemBulkOperation.getOperationType() == CosmosItemOperationType.READ || (itemBulkOperation.getRequestOptions() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled().booleanValue())) { options.setContentResponseOnWriteEnabled(true); break; } } } } return this.docClientWrapper.executeBatchRequest( BridgeInternal.getLink(this.container), serverRequest, options, false); } private void completeAllSinks() { logger.info("Closing all sinks, Context: {}", this.operationContextText); executorService.shutdown(); logger.debug("Executor service shut down, Context: {}", this.operationContextText); mainSink.tryEmitComplete(); logger.debug("Main sink completed, Context: {}", this.operationContextText); groupSinks.forEach(FluxSink::complete); logger.debug("All group sinks completed, Context: {}", this.operationContextText); try { this.executorService.shutdown(); logger.debug("Shutting down the executor service"); } catch (Exception e) { logger.warn("Failed to shut down the executor service", e); } } private void onFlush() { try { this.groupSinks.forEach(sink -> sink.next(FlushBuffersItemOperation.singleton())); } catch(Throwable t) { logger.error("Callback invocation 'onFlush' failed.", t); } } private static String getItemOperationDiagnostics(CosmosItemOperation operation) { if (operation == FlushBuffersItemOperation.singleton()) { return "ItemOperation[Type: Flush]"; } StringBuilder sb = new StringBuilder(); sb .append("ItemOperation[Type: ") .append(operation.getOperationType().toString()) .append(", PK: ") .append(operation.getPartitionKeyValue() != null ? operation.getPartitionKeyValue().toString() : "n/a") .append(", id: ") .append(operation.getId()) .append("]"); return sb.toString(); } private static String getThreadInfo() { StringBuilder sb = new StringBuilder(); Thread t = Thread.currentThread(); sb .append("Thread[") .append("Name: ") .append(t.getName()) .append(",Group: ") .append(t.getThreadGroup() != null ? t.getThreadGroup().getName() : "n/a") .append(", isDaemon: ") .append(t.isDaemon()) .append(", Id: ") .append(t.getId()) .append("]"); return sb.toString(); } private class SerializedEmitFailureHandler implements Sinks.EmitFailureHandler { @Override public boolean onEmitFailure(SignalType signalType, Sinks.EmitResult emitResult) { logger.debug("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); if (emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED)) { return true; } return false; } } }
class BulkExecutor<TContext> { private final static Logger logger = LoggerFactory.getLogger(BulkExecutor.class); private final static AtomicLong instanceCount = new AtomicLong(0); private final CosmosAsyncContainer container; private final AsyncDocumentClient docClientWrapper; private final String operationContextText; private final OperationContextAndListenerTuple operationListener; private final ThrottlingRetryOptions throttlingRetryOptions; private final Flux<com.azure.cosmos.models.CosmosItemOperation> inputOperations; private final Long maxMicroBatchIntervalInMs; private final TContext batchContext; private final ConcurrentMap<String, PartitionScopeThresholds> partitionScopeThresholds; private final CosmosBulkExecutionOptions cosmosBulkExecutionOptions; private final AtomicBoolean mainSourceCompleted; private final AtomicInteger totalCount; private final Sinks.EmitFailureHandler serializedEmitFailureHandler; private final Sinks.Many<CosmosItemOperation> mainSink; private final List<FluxSink<CosmosItemOperation>> groupSinks; private final ScheduledExecutorService executorService; private ScheduledFuture<?> scheduledFutureForFlush; public BulkExecutor(CosmosAsyncContainer container, Flux<CosmosItemOperation> inputOperations, CosmosBulkExecutionOptions cosmosBulkOptions) { checkNotNull(container, "expected non-null container"); checkNotNull(inputOperations, "expected non-null inputOperations"); checkNotNull(cosmosBulkOptions, "expected non-null bulkOptions"); this.cosmosBulkExecutionOptions = cosmosBulkOptions; this.container = container; this.inputOperations = inputOperations; this.docClientWrapper = CosmosBridgeInternal.getAsyncDocumentClient(container.getDatabase()); this.throttlingRetryOptions = docClientWrapper.getConnectionPolicy().getThrottlingRetryOptions(); maxMicroBatchIntervalInMs = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchInterval(cosmosBulkExecutionOptions) .toMillis(); batchContext = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getLegacyBatchScopedContext(cosmosBulkExecutionOptions); this.partitionScopeThresholds = ImplementationBridgeHelpers.CosmosBulkExecutionThresholdsStateHelper .getBulkExecutionThresholdsAccessor() .getPartitionScopeThresholds(cosmosBulkExecutionOptions.getThresholdsState()); operationListener = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getOperationContext(cosmosBulkExecutionOptions); if (operationListener != null && operationListener.getOperationContext() != null) { operationContextText = operationListener.getOperationContext().toString(); } else { operationContextText = "n/a"; } mainSourceCompleted = new AtomicBoolean(false); totalCount = new AtomicInteger(0); serializedEmitFailureHandler = new SerializedEmitFailureHandler(); mainSink = Sinks.many().unicast().onBackpressureBuffer(); groupSinks = new CopyOnWriteArrayList<>(); this.executorService = Executors.newSingleThreadScheduledExecutor( new CosmosDaemonThreadFactory("BulkExecutor-" + instanceCount.incrementAndGet())); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, this.maxMicroBatchIntervalInMs, this.maxMicroBatchIntervalInMs, TimeUnit.MILLISECONDS); } public Flux<CosmosBulkOperationResponse<TContext>> execute() { } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionedGroup( GroupedFlux<PartitionScopeThresholds, CosmosItemOperation> partitionedGroupFluxOfInputOperations) { final PartitionScopeThresholds thresholds = partitionedGroupFluxOfInputOperations.key(); final FluxProcessor<CosmosItemOperation, CosmosItemOperation> groupFluxProcessor = UnicastProcessor.<CosmosItemOperation>create().serialize(); final FluxSink<CosmosItemOperation> groupSink = groupFluxProcessor.sink(FluxSink.OverflowStrategy.BUFFER); groupSinks.add(groupSink); AtomicLong firstRecordTimeStamp = new AtomicLong(-1); AtomicLong currentMicroBatchSize = new AtomicLong(0); AtomicInteger currentTotalSerializedLength = new AtomicInteger(0); return partitionedGroupFluxOfInputOperations .mergeWith(groupFluxProcessor) .onBackpressureBuffer() .timestamp() .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .bufferUntil(timeStampItemOperationTuple -> { long timestamp = timeStampItemOperationTuple.getT1(); CosmosItemOperation itemOperation = timeStampItemOperationTuple.getT2(); logger.trace( "BufferUntil - enqueued {}, {}, Context: {} {}", timestamp, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (itemOperation == FlushBuffersItemOperation.singleton()) { long currentMicroBatchSizeSnapshot = currentMicroBatchSize.get(); if (currentMicroBatchSizeSnapshot > 0) { logger.trace( "Flushing PKRange {} (batch size: {}) due to FlushItemOperation, Context: {} {}", thresholds.getPartitionKeyRangeId(), currentMicroBatchSizeSnapshot, this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; } firstRecordTimeStamp.compareAndSet(-1, timestamp); long age = timestamp - firstRecordTimeStamp.get(); long batchSize = currentMicroBatchSize.incrementAndGet(); int totalSerializedLength = this.calculateTotalSerializedLength(currentTotalSerializedLength, itemOperation); if (batchSize >= thresholds.getTargetMicroBatchSizeSnapshot() || age >= this.maxMicroBatchIntervalInMs || totalSerializedLength >= BatchRequestResponseConstants.MAX_DIRECT_MODE_BATCH_REQUEST_BODY_SIZE_IN_BYTES) { logger.debug( "BufferUntil - Flushing PKRange {} due to BatchSize ({}), payload size ({}) or age ({}), " + "Triggering {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), batchSize, totalSerializedLength, age, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; }) .flatMap( (List<Tuple2<Long, CosmosItemOperation>> timeStampAndItemOperationTuples) -> { List<CosmosItemOperation> operations = new ArrayList<>(timeStampAndItemOperationTuples.size()); for (Tuple2<Long, CosmosItemOperation> timeStampAndItemOperationTuple : timeStampAndItemOperationTuples) { CosmosItemOperation itemOperation = timeStampAndItemOperationTuple.getT2(); if (itemOperation == FlushBuffersItemOperation.singleton()) { continue; } operations.add(itemOperation); } logger.debug( "Flushing PKRange {} micro batch with {} operations, Context: {} {}", thresholds.getPartitionKeyRangeId(), operations.size(), this.operationContextText, getThreadInfo()); return executeOperations(operations, thresholds, groupSink); }, ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchConcurrency(this.cosmosBulkExecutionOptions)); } private int calculateTotalSerializedLength(AtomicInteger currentTotalSerializedLength, CosmosItemOperation item) { if (item instanceof CosmosItemOperationBase) { return currentTotalSerializedLength.accumulateAndGet( ((CosmosItemOperationBase) item).getSerializedLength(), (currentValue, incremental) -> currentValue + incremental); } return currentTotalSerializedLength.get(); } private Flux<CosmosBulkOperationResponse<TContext>> executeOperations( List<CosmosItemOperation> operations, PartitionScopeThresholds thresholds, FluxSink<CosmosItemOperation> groupSink) { if (operations.size() == 0) { logger.trace("Empty operations list, Context: {}", this.operationContextText); return Flux.empty(); } String pkRange = thresholds.getPartitionKeyRangeId(); ServerOperationBatchRequest serverOperationBatchRequest = BulkExecutorUtil.createBatchRequest(operations, pkRange); if (serverOperationBatchRequest.getBatchPendingOperations().size() > 0) { serverOperationBatchRequest.getBatchPendingOperations().forEach(groupSink::next); } return Flux.just(serverOperationBatchRequest.getBatchRequest()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((PartitionKeyRangeServerBatchRequest serverRequest) -> this.executePartitionKeyRangeServerBatchRequest(serverRequest, groupSink, thresholds)); } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionKeyRangeServerBatchRequest( PartitionKeyRangeServerBatchRequest serverRequest, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { return this.executeBatchRequest(serverRequest) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(response -> Flux .fromIterable(response.getResults()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosBatchOperationResult result) -> handleTransactionalBatchOperationResult(response, result, groupSink, thresholds))) .onErrorResume((Throwable throwable) -> { if (!(throwable instanceof Exception)) { throw Exceptions.propagate(throwable); } Exception exception = (Exception) throwable; return Flux .fromIterable(serverRequest.getOperations()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosItemOperation itemOperation) -> handleTransactionalBatchExecutionException(itemOperation, exception, groupSink, thresholds)); }); } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchOperationResult( CosmosBatchResponse response, CosmosBatchOperationResult operationResult, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { CosmosBulkItemResponse cosmosBulkItemResponse = ModelBridgeInternal .createCosmosBulkItemResponse(operationResult, response); CosmosItemOperation itemOperation = operationResult.getOperation(); TContext actualContext = this.getActualContext(itemOperation); logger.debug( "HandleTransactionalBatchOperationResult - PKRange {}, Response Status Code {}, " + "Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (!operationResult.isSuccessStatusCode()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy().shouldRetry(operationResult).flatMap( result -> { if (result.shouldRetry) { logger.debug( "HandleTransactionalBatchOperationResult - enqueue retry, PKRange {}, Response " + "Status Code {}, Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return this.enqueueForRetry(result.backOffTime, groupSink, itemOperation, thresholds); } else { logger.error( "HandleTransactionalBatchOperationResult - Fail, PKRange {}, Response Status " + "Code {}, Operation Status Code {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } }); } else { throw new UnsupportedOperationException("Unknown CosmosItemOperation."); } } thresholds.recordSuccessfulOperation(); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } private TContext getActualContext(CosmosItemOperation itemOperation) { ItemBulkOperation<?, ?> itemBulkOperation = null; if (itemOperation instanceof ItemBulkOperation<?, ?>) { itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; } if (itemBulkOperation == null) { return this.batchContext; } TContext operationContext = itemBulkOperation.getContext(); if (operationContext != null) { return operationContext; } return this.batchContext; } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchExecutionException( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { logger.debug( "HandleTransactionalBatchExecutionException, PKRange {}, Error: {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (exception instanceof CosmosException && itemOperation instanceof ItemBulkOperation<?, ?>) { CosmosException cosmosException = (CosmosException) exception; ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy() .shouldRetryForGone(cosmosException.getStatusCode(), cosmosException.getSubStatusCode()) .flatMap(shouldRetryGone -> { if (shouldRetryGone) { logger.debug( "HandleTransactionalBatchExecutionException - Retry due to split, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); mainSink.emitNext(itemOperation, serializedEmitFailureHandler); return Mono.empty(); } else { logger.debug( "HandleTransactionalBatchExecutionException - Retry other, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return retryOtherExceptions( itemOperation, exception, groupSink, cosmosException, itemBulkOperation, thresholds); } }); } TContext actualContext = this.getActualContext(itemOperation); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse(itemOperation, exception, actualContext)); } private Mono<CosmosBulkOperationResponse<TContext>> enqueueForRetry( Duration backOffTime, FluxSink<CosmosItemOperation> groupSink, CosmosItemOperation itemOperation, PartitionScopeThresholds thresholds) { thresholds.recordEnqueuedRetry(); if (backOffTime == null || backOffTime.isZero()) { groupSink.next(itemOperation); return Mono.empty(); } else { return Mono .delay(backOffTime) .flatMap((dummy) -> { groupSink.next(itemOperation); return Mono.empty(); }); } } private Mono<CosmosBulkOperationResponse<TContext>> retryOtherExceptions( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, CosmosException cosmosException, ItemBulkOperation<?, ?> itemBulkOperation, PartitionScopeThresholds thresholds) { TContext actualContext = this.getActualContext(itemOperation); return itemBulkOperation.getRetryPolicy().shouldRetry(cosmosException).flatMap(result -> { if (result.shouldRetry) { return this.enqueueForRetry(result.backOffTime, groupSink, itemBulkOperation, thresholds); } else { return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, exception, actualContext)); } }); } private Mono<CosmosBatchResponse> executeBatchRequest(PartitionKeyRangeServerBatchRequest serverRequest) { RequestOptions options = new RequestOptions(); options.setOperationContextAndListenerTuple(operationListener); if (!this.docClientWrapper.isContentResponseOnWriteEnabled() && serverRequest.getOperations().size() > 0) { for (CosmosItemOperation itemOperation : serverRequest.getOperations()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; if (itemBulkOperation.getOperationType() == CosmosItemOperationType.READ || (itemBulkOperation.getRequestOptions() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled().booleanValue())) { options.setContentResponseOnWriteEnabled(true); break; } } } } return this.docClientWrapper.executeBatchRequest( BridgeInternal.getLink(this.container), serverRequest, options, false); } private void completeAllSinks() { logger.info("Closing all sinks, Context: {}", this.operationContextText); executorService.shutdown(); logger.debug("Executor service shut down, Context: {}", this.operationContextText); mainSink.tryEmitComplete(); logger.debug("Main sink completed, Context: {}", this.operationContextText); groupSinks.forEach(FluxSink::complete); logger.debug("All group sinks completed, Context: {}", this.operationContextText); try { this.executorService.shutdown(); logger.debug("Shutting down the executor service"); } catch (Exception e) { logger.warn("Failed to shut down the executor service", e); } } private void onFlush() { try { this.groupSinks.forEach(sink -> sink.next(FlushBuffersItemOperation.singleton())); } catch(Throwable t) { logger.error("Callback invocation 'onFlush' failed.", t); } } private static String getItemOperationDiagnostics(CosmosItemOperation operation) { if (operation == FlushBuffersItemOperation.singleton()) { return "ItemOperation[Type: Flush]"; } StringBuilder sb = new StringBuilder(); sb .append("ItemOperation[Type: ") .append(operation.getOperationType().toString()) .append(", PK: ") .append(operation.getPartitionKeyValue() != null ? operation.getPartitionKeyValue().toString() : "n/a") .append(", id: ") .append(operation.getId()) .append("]"); return sb.toString(); } private static String getThreadInfo() { StringBuilder sb = new StringBuilder(); Thread t = Thread.currentThread(); sb .append("Thread[") .append("Name: ") .append(t.getName()) .append(",Group: ") .append(t.getThreadGroup() != null ? t.getThreadGroup().getName() : "n/a") .append(", isDaemon: ") .append(t.isDaemon()) .append(", Id: ") .append(t.getId()) .append("]"); return sb.toString(); } private class SerializedEmitFailureHandler implements Sinks.EmitFailureHandler { @Override public boolean onEmitFailure(SignalType signalType, Sinks.EmitResult emitResult) { if (emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED)) { logger.debug("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); return true; } logger.error("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); return false; } } }
typo: teh -> the
public Flux<CosmosBulkOperationResponse<TContext>> execute() { Integer nullableMaxConcurrentCosmosPartitions = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxConcurrentCosmosPartitions(cosmosBulkExecutionOptions); Mono<Integer> maxConcurrentCosmosPartitionsMono = nullableMaxConcurrentCosmosPartitions != null ? Mono.just(Math.max(256, nullableMaxConcurrentCosmosPartitions)) : this.container.getFeedRanges().map(ranges -> Math.max(256, ranges.size() * 2)); return maxConcurrentCosmosPartitionsMono .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(maxConcurrentCosmosPartitions -> { logger.debug("BulkExecutor.execute with MaxConcurrentPartitions: {}, Context: {}", maxConcurrentCosmosPartitions, this.operationContextText); return this.inputOperations .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .onErrorContinue((throwable, o) -> logger.error("Skipping an error operation while processing {}. Cause: {}, Context: {}", o, throwable.getMessage(), this.operationContextText)) .doOnNext((CosmosItemOperation cosmosItemOperation) -> { BulkExecutorUtil.setRetryPolicyForBulk( docClientWrapper, this.container, cosmosItemOperation, this.throttlingRetryOptions); if (cosmosItemOperation != FlushBuffersItemOperation.singleton()) { totalCount.incrementAndGet(); } logger.trace( "SetupRetryPolicy, {}, TotalCount: {}, Context: {}, {}", getItemOperationDiagnostics(cosmosItemOperation), totalCount.get(), this.operationContextText, getThreadInfo() ); }) .doOnComplete(() -> { mainSourceCompleted.set(true); long totalCountSnapshot = totalCount.get(); logger.debug("Main source completed - totalCountSnapshot, this.operationContextText); if (totalCountSnapshot == 0) { completeAllSinks(); } else { ScheduledFuture<?> scheduledFutureSnapshot = this.scheduledFutureForFlush; if (scheduledFutureSnapshot != null) { try { scheduledFutureSnapshot.cancel(true); logger.debug("Cancelled all future scheduled tasks {}", getThreadInfo()); } catch (Exception e) { logger.warn("Failed to cancel scheduled tasks{}", getThreadInfo(), e); } } this.onFlush(); long flushIntervalAfterDrainingIncomingFlux = Math.min( this.maxMicroBatchIntervalInMs, BatchRequestResponseConstants .DEFAULT_MAX_MICRO_BATCH_INTERVAL_AFTER_DRAINING_INCOMING_FLUX_IN_MILLISECONDS); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, flushIntervalAfterDrainingIncomingFlux, flushIntervalAfterDrainingIncomingFlux, TimeUnit.MILLISECONDS); } }) .mergeWith(mainSink.asFlux()) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap( operation -> { logger.trace("Before Resolve PkRangeId, {}, Context: {} {}", getItemOperationDiagnostics(operation), this.operationContextText, getThreadInfo()); return BulkExecutorUtil.resolvePartitionKeyRangeId(this.docClientWrapper, this.container, operation) .map((String pkRangeId) -> { PartitionScopeThresholds partitionScopeThresholds = this.partitionScopeThresholds.computeIfAbsent( pkRangeId, (newPkRangeId) -> new PartitionScopeThresholds(newPkRangeId, this.cosmosBulkExecutionOptions)); logger.trace("Resolved PkRangeId, {}, PKRangeId: {} Context: {} {}", getItemOperationDiagnostics(operation), pkRangeId, this.operationContextText, getThreadInfo()); return Pair.of(partitionScopeThresholds, operation); }); }) .groupBy(Pair::getKey, Pair::getValue) .flatMap( this::executePartitionedGroup, maxConcurrentCosmosPartitions) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .doOnNext(requestAndResponse -> { int totalCountAfterDecrement = totalCount.decrementAndGet(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountAfterDecrement == 0 && mainSourceCompletedSnapshot) { logger.debug("All work completed, {}, TotalCount: {}, Context: {} {}", getItemOperationDiagnostics(requestAndResponse.getOperation()), totalCountAfterDecrement, this.operationContextText, getThreadInfo()); completeAllSinks(); } else { logger.debug( "Work left - TotalCount after decrement: {}, main sink completed {}, {}, Context: {} {}", totalCountAfterDecrement, mainSourceCompletedSnapshot, getItemOperationDiagnostics(requestAndResponse.getOperation()), this.operationContextText, getThreadInfo()); } }) .doOnComplete(() -> { int totalCountSnapshot = totalCount.get(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountSnapshot == 0 && mainSourceCompletedSnapshot) { logger.debug("DoOnComplete: All work completed, Context: {}", this.operationContextText); completeAllSinks(); } else { logger.debug( "DoOnComplete: Work left - TotalCount after decrement: {}, main sink completed {}, Context: {} {}", totalCountSnapshot, mainSourceCompletedSnapshot, this.operationContextText, getThreadInfo()); } }); }); }
default concurrency (256), Integer nullableMaxConcurrentCosmosPartitions = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxConcurrentCosmosPartitions(cosmosBulkExecutionOptions); Mono<Integer> maxConcurrentCosmosPartitionsMono = nullableMaxConcurrentCosmosPartitions != null ? Mono.just(Math.max(256, nullableMaxConcurrentCosmosPartitions)) : this.container.getFeedRanges().map(ranges -> Math.max(256, ranges.size() * 2)); return maxConcurrentCosmosPartitionsMono .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(maxConcurrentCosmosPartitions -> { logger.debug("BulkExecutor.execute with MaxConcurrentPartitions: {}, Context: {}", maxConcurrentCosmosPartitions, this.operationContextText); return this.inputOperations .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .onErrorContinue((throwable, o) -> logger.error("Skipping an error operation while processing {}. Cause: {}, Context: {}", o, throwable.getMessage(), this.operationContextText)) .doOnNext((CosmosItemOperation cosmosItemOperation) -> { BulkExecutorUtil.setRetryPolicyForBulk( docClientWrapper, this.container, cosmosItemOperation, this.throttlingRetryOptions); if (cosmosItemOperation != FlushBuffersItemOperation.singleton()) { totalCount.incrementAndGet(); } logger.trace( "SetupRetryPolicy, {}, TotalCount: {}, Context: {}, {}", getItemOperationDiagnostics(cosmosItemOperation), totalCount.get(), this.operationContextText, getThreadInfo() ); }) .doOnComplete(() -> { mainSourceCompleted.set(true); long totalCountSnapshot = totalCount.get(); logger.debug("Main source completed - totalCountSnapshot, this.operationContextText); if (totalCountSnapshot == 0) { completeAllSinks(); } else { ScheduledFuture<?> scheduledFutureSnapshot = this.scheduledFutureForFlush; if (scheduledFutureSnapshot != null) { try { scheduledFutureSnapshot.cancel(true); logger.debug("Cancelled all future scheduled tasks {}", getThreadInfo()); } catch (Exception e) { logger.warn("Failed to cancel scheduled tasks{}", getThreadInfo(), e); } } this.onFlush(); long flushIntervalAfterDrainingIncomingFlux = Math.min( this.maxMicroBatchIntervalInMs, BatchRequestResponseConstants .DEFAULT_MAX_MICRO_BATCH_INTERVAL_AFTER_DRAINING_INCOMING_FLUX_IN_MILLISECONDS); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, flushIntervalAfterDrainingIncomingFlux, flushIntervalAfterDrainingIncomingFlux, TimeUnit.MILLISECONDS); } }) .mergeWith(mainSink.asFlux()) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap( operation -> { logger.trace("Before Resolve PkRangeId, {}, Context: {} {}", getItemOperationDiagnostics(operation), this.operationContextText, getThreadInfo()); return BulkExecutorUtil.resolvePartitionKeyRangeId(this.docClientWrapper, this.container, operation) .map((String pkRangeId) -> { PartitionScopeThresholds partitionScopeThresholds = this.partitionScopeThresholds.computeIfAbsent( pkRangeId, (newPkRangeId) -> new PartitionScopeThresholds(newPkRangeId, this.cosmosBulkExecutionOptions)); logger.trace("Resolved PkRangeId, {}, PKRangeId: {} Context: {} {}", getItemOperationDiagnostics(operation), pkRangeId, this.operationContextText, getThreadInfo()); return Pair.of(partitionScopeThresholds, operation); }); }) .groupBy(Pair::getKey, Pair::getValue) .flatMap( this::executePartitionedGroup, maxConcurrentCosmosPartitions) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .doOnNext(requestAndResponse -> { int totalCountAfterDecrement = totalCount.decrementAndGet(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountAfterDecrement == 0 && mainSourceCompletedSnapshot) { logger.debug("All work completed, {}, TotalCount: {}, Context: {} {}", getItemOperationDiagnostics(requestAndResponse.getOperation()), totalCountAfterDecrement, this.operationContextText, getThreadInfo()); completeAllSinks(); } else { logger.debug( "Work left - TotalCount after decrement: {}, main sink completed {}, {}, Context: {} {}", totalCountAfterDecrement, mainSourceCompletedSnapshot, getItemOperationDiagnostics(requestAndResponse.getOperation()), this.operationContextText, getThreadInfo()); } }) .doOnComplete(() -> { int totalCountSnapshot = totalCount.get(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountSnapshot == 0 && mainSourceCompletedSnapshot) { logger.debug("DoOnComplete: All work completed, Context: {}", this.operationContextText); completeAllSinks(); } else { logger.debug( "DoOnComplete: Work left - TotalCount after decrement: {}, main sink completed {}, Context: {} {}", totalCountSnapshot, mainSourceCompletedSnapshot, this.operationContextText, getThreadInfo()); } }); }
class BulkExecutor<TContext> { private final static Logger logger = LoggerFactory.getLogger(BulkExecutor.class); private final static AtomicLong instanceCount = new AtomicLong(0); private final CosmosAsyncContainer container; private final AsyncDocumentClient docClientWrapper; private final String operationContextText; private final OperationContextAndListenerTuple operationListener; private final ThrottlingRetryOptions throttlingRetryOptions; private final Flux<com.azure.cosmos.models.CosmosItemOperation> inputOperations; private final Long maxMicroBatchIntervalInMs; private final TContext batchContext; private final ConcurrentMap<String, PartitionScopeThresholds> partitionScopeThresholds; private final CosmosBulkExecutionOptions cosmosBulkExecutionOptions; private final AtomicBoolean mainSourceCompleted; private final AtomicInteger totalCount; private final Sinks.EmitFailureHandler serializedEmitFailureHandler; private final Sinks.Many<CosmosItemOperation> mainSink; private final List<FluxSink<CosmosItemOperation>> groupSinks; private final ScheduledExecutorService executorService; private ScheduledFuture<?> scheduledFutureForFlush; public BulkExecutor(CosmosAsyncContainer container, Flux<CosmosItemOperation> inputOperations, CosmosBulkExecutionOptions cosmosBulkOptions) { checkNotNull(container, "expected non-null container"); checkNotNull(inputOperations, "expected non-null inputOperations"); checkNotNull(cosmosBulkOptions, "expected non-null bulkOptions"); this.cosmosBulkExecutionOptions = cosmosBulkOptions; this.container = container; this.inputOperations = inputOperations; this.docClientWrapper = CosmosBridgeInternal.getAsyncDocumentClient(container.getDatabase()); this.throttlingRetryOptions = docClientWrapper.getConnectionPolicy().getThrottlingRetryOptions(); maxMicroBatchIntervalInMs = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchInterval(cosmosBulkExecutionOptions) .toMillis(); batchContext = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getLegacyBatchScopedContext(cosmosBulkExecutionOptions); this.partitionScopeThresholds = ImplementationBridgeHelpers.CosmosBulkExecutionThresholdsStateHelper .getBulkExecutionThresholdsAccessor() .getPartitionScopeThresholds(cosmosBulkExecutionOptions.getThresholdsState()); operationListener = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getOperationContext(cosmosBulkExecutionOptions); if (operationListener != null && operationListener.getOperationContext() != null) { operationContextText = operationListener.getOperationContext().toString(); } else { operationContextText = "n/a"; } mainSourceCompleted = new AtomicBoolean(false); totalCount = new AtomicInteger(0); serializedEmitFailureHandler = new SerializedEmitFailureHandler(); mainSink = Sinks.many().unicast().onBackpressureBuffer(); groupSinks = new CopyOnWriteArrayList<>(); this.executorService = Executors.newSingleThreadScheduledExecutor( new CosmosDaemonThreadFactory("BulkExecutor-" + instanceCount.incrementAndGet())); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, this.maxMicroBatchIntervalInMs, this.maxMicroBatchIntervalInMs, TimeUnit.MILLISECONDS); } public Flux<CosmosBulkOperationResponse<TContext>> execute() { } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionedGroup( GroupedFlux<PartitionScopeThresholds, CosmosItemOperation> partitionedGroupFluxOfInputOperations) { final PartitionScopeThresholds thresholds = partitionedGroupFluxOfInputOperations.key(); final FluxProcessor<CosmosItemOperation, CosmosItemOperation> groupFluxProcessor = UnicastProcessor.<CosmosItemOperation>create().serialize(); final FluxSink<CosmosItemOperation> groupSink = groupFluxProcessor.sink(FluxSink.OverflowStrategy.BUFFER); groupSinks.add(groupSink); AtomicLong firstRecordTimeStamp = new AtomicLong(-1); AtomicLong currentMicroBatchSize = new AtomicLong(0); AtomicInteger currentTotalSerializedLength = new AtomicInteger(0); return partitionedGroupFluxOfInputOperations .mergeWith(groupFluxProcessor) .onBackpressureBuffer() .timestamp() .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .bufferUntil(timeStampItemOperationTuple -> { long timestamp = timeStampItemOperationTuple.getT1(); CosmosItemOperation itemOperation = timeStampItemOperationTuple.getT2(); logger.trace( "BufferUntil - enqueued {}, {}, Context: {} {}", timestamp, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (itemOperation == FlushBuffersItemOperation.singleton()) { long currentMicroBatchSizeSnapshot = currentMicroBatchSize.get(); if (currentMicroBatchSizeSnapshot > 0) { logger.trace( "Flushing PKRange {} (batch size: {}) due to FlushItemOperation, Context: {} {}", thresholds.getPartitionKeyRangeId(), currentMicroBatchSizeSnapshot, this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; } firstRecordTimeStamp.compareAndSet(-1, timestamp); long age = timestamp - firstRecordTimeStamp.get(); long batchSize = currentMicroBatchSize.incrementAndGet(); int totalSerializedLength = this.calculateTotalSerializedLength(currentTotalSerializedLength, itemOperation); if (batchSize >= thresholds.getTargetMicroBatchSizeSnapshot() || age >= this.maxMicroBatchIntervalInMs || totalSerializedLength >= BatchRequestResponseConstants.MAX_DIRECT_MODE_BATCH_REQUEST_BODY_SIZE_IN_BYTES) { logger.debug( "BufferUntil - Flushing PKRange {} due to BatchSize ({}), payload size ({}) or age ({}), " + "Triggering {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), batchSize, totalSerializedLength, age, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; }) .flatMap( (List<Tuple2<Long, CosmosItemOperation>> timeStampAndItemOperationTuples) -> { List<CosmosItemOperation> operations = new ArrayList<>(timeStampAndItemOperationTuples.size()); for (Tuple2<Long, CosmosItemOperation> timeStampAndItemOperationTuple : timeStampAndItemOperationTuples) { CosmosItemOperation itemOperation = timeStampAndItemOperationTuple.getT2(); if (itemOperation == FlushBuffersItemOperation.singleton()) { continue; } operations.add(itemOperation); } logger.debug( "Flushing PKRange {} micro batch with {} operations, Context: {} {}", thresholds.getPartitionKeyRangeId(), operations.size(), this.operationContextText, getThreadInfo()); return executeOperations(operations, thresholds, groupSink); }, ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchConcurrency(this.cosmosBulkExecutionOptions)); } private int calculateTotalSerializedLength(AtomicInteger currentTotalSerializedLength, CosmosItemOperation item) { if (item instanceof CosmosItemOperationBase) { return currentTotalSerializedLength.accumulateAndGet( ((CosmosItemOperationBase) item).getSerializedLength(), (currentValue, incremental) -> currentValue + incremental); } return currentTotalSerializedLength.get(); } private Flux<CosmosBulkOperationResponse<TContext>> executeOperations( List<CosmosItemOperation> operations, PartitionScopeThresholds thresholds, FluxSink<CosmosItemOperation> groupSink) { if (operations.size() == 0) { logger.trace("Empty operations list, Context: {}", this.operationContextText); return Flux.empty(); } String pkRange = thresholds.getPartitionKeyRangeId(); ServerOperationBatchRequest serverOperationBatchRequest = BulkExecutorUtil.createBatchRequest(operations, pkRange); if (serverOperationBatchRequest.getBatchPendingOperations().size() > 0) { serverOperationBatchRequest.getBatchPendingOperations().forEach(groupSink::next); } return Flux.just(serverOperationBatchRequest.getBatchRequest()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((PartitionKeyRangeServerBatchRequest serverRequest) -> this.executePartitionKeyRangeServerBatchRequest(serverRequest, groupSink, thresholds)); } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionKeyRangeServerBatchRequest( PartitionKeyRangeServerBatchRequest serverRequest, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { return this.executeBatchRequest(serverRequest) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(response -> Flux .fromIterable(response.getResults()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosBatchOperationResult result) -> handleTransactionalBatchOperationResult(response, result, groupSink, thresholds))) .onErrorResume((Throwable throwable) -> { if (!(throwable instanceof Exception)) { throw Exceptions.propagate(throwable); } Exception exception = (Exception) throwable; return Flux .fromIterable(serverRequest.getOperations()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosItemOperation itemOperation) -> handleTransactionalBatchExecutionException(itemOperation, exception, groupSink, thresholds)); }); } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchOperationResult( CosmosBatchResponse response, CosmosBatchOperationResult operationResult, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { CosmosBulkItemResponse cosmosBulkItemResponse = ModelBridgeInternal .createCosmosBulkItemResponse(operationResult, response); CosmosItemOperation itemOperation = operationResult.getOperation(); TContext actualContext = this.getActualContext(itemOperation); logger.debug( "HandleTransactionalBatchOperationResult - PKRange {}, Response Status Code {}, " + "Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (!operationResult.isSuccessStatusCode()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy().shouldRetry(operationResult).flatMap( result -> { if (result.shouldRetry) { logger.debug( "HandleTransactionalBatchOperationResult - enqueue retry, PKRange {}, Response " + "Status Code {}, Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return this.enqueueForRetry(result.backOffTime, groupSink, itemOperation, thresholds); } else { logger.error( "HandleTransactionalBatchOperationResult - Fail, PKRange {}, Response Status " + "Code {}, Operation Status Code {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } }); } else { throw new UnsupportedOperationException("Unknown CosmosItemOperation."); } } thresholds.recordSuccessfulOperation(); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } private TContext getActualContext(CosmosItemOperation itemOperation) { ItemBulkOperation<?, ?> itemBulkOperation = null; if (itemOperation instanceof ItemBulkOperation<?, ?>) { itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; } if (itemBulkOperation == null) { return this.batchContext; } TContext operationContext = itemBulkOperation.getContext(); if (operationContext != null) { return operationContext; } return this.batchContext; } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchExecutionException( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { logger.debug( "HandleTransactionalBatchExecutionException, PKRange {}, Error: {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (exception instanceof CosmosException && itemOperation instanceof ItemBulkOperation<?, ?>) { CosmosException cosmosException = (CosmosException) exception; ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy() .shouldRetryForGone(cosmosException.getStatusCode(), cosmosException.getSubStatusCode()) .flatMap(shouldRetryGone -> { if (shouldRetryGone) { logger.debug( "HandleTransactionalBatchExecutionException - Retry due to split, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); mainSink.emitNext(itemOperation, serializedEmitFailureHandler); return Mono.empty(); } else { logger.debug( "HandleTransactionalBatchExecutionException - Retry other, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return retryOtherExceptions( itemOperation, exception, groupSink, cosmosException, itemBulkOperation, thresholds); } }); } TContext actualContext = this.getActualContext(itemOperation); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse(itemOperation, exception, actualContext)); } private Mono<CosmosBulkOperationResponse<TContext>> enqueueForRetry( Duration backOffTime, FluxSink<CosmosItemOperation> groupSink, CosmosItemOperation itemOperation, PartitionScopeThresholds thresholds) { thresholds.recordEnqueuedRetry(); if (backOffTime == null || backOffTime.isZero()) { groupSink.next(itemOperation); return Mono.empty(); } else { return Mono .delay(backOffTime) .flatMap((dummy) -> { groupSink.next(itemOperation); return Mono.empty(); }); } } private Mono<CosmosBulkOperationResponse<TContext>> retryOtherExceptions( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, CosmosException cosmosException, ItemBulkOperation<?, ?> itemBulkOperation, PartitionScopeThresholds thresholds) { TContext actualContext = this.getActualContext(itemOperation); return itemBulkOperation.getRetryPolicy().shouldRetry(cosmosException).flatMap(result -> { if (result.shouldRetry) { return this.enqueueForRetry(result.backOffTime, groupSink, itemBulkOperation, thresholds); } else { return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, exception, actualContext)); } }); } private Mono<CosmosBatchResponse> executeBatchRequest(PartitionKeyRangeServerBatchRequest serverRequest) { RequestOptions options = new RequestOptions(); options.setOperationContextAndListenerTuple(operationListener); if (!this.docClientWrapper.isContentResponseOnWriteEnabled() && serverRequest.getOperations().size() > 0) { for (CosmosItemOperation itemOperation : serverRequest.getOperations()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; if (itemBulkOperation.getOperationType() == CosmosItemOperationType.READ || (itemBulkOperation.getRequestOptions() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled().booleanValue())) { options.setContentResponseOnWriteEnabled(true); break; } } } } return this.docClientWrapper.executeBatchRequest( BridgeInternal.getLink(this.container), serverRequest, options, false); } private void completeAllSinks() { logger.info("Closing all sinks, Context: {}", this.operationContextText); executorService.shutdown(); logger.debug("Executor service shut down, Context: {}", this.operationContextText); mainSink.tryEmitComplete(); logger.debug("Main sink completed, Context: {}", this.operationContextText); groupSinks.forEach(FluxSink::complete); logger.debug("All group sinks completed, Context: {}", this.operationContextText); try { this.executorService.shutdown(); logger.debug("Shutting down the executor service"); } catch (Exception e) { logger.warn("Failed to shut down the executor service", e); } } private void onFlush() { try { this.groupSinks.forEach(sink -> sink.next(FlushBuffersItemOperation.singleton())); } catch(Throwable t) { logger.error("Callback invocation 'onFlush' failed.", t); } } private static String getItemOperationDiagnostics(CosmosItemOperation operation) { if (operation == FlushBuffersItemOperation.singleton()) { return "ItemOperation[Type: Flush]"; } StringBuilder sb = new StringBuilder(); sb .append("ItemOperation[Type: ") .append(operation.getOperationType().toString()) .append(", PK: ") .append(operation.getPartitionKeyValue() != null ? operation.getPartitionKeyValue().toString() : "n/a") .append(", id: ") .append(operation.getId()) .append("]"); return sb.toString(); } private static String getThreadInfo() { StringBuilder sb = new StringBuilder(); Thread t = Thread.currentThread(); sb .append("Thread[") .append("Name: ") .append(t.getName()) .append(",Group: ") .append(t.getThreadGroup() != null ? t.getThreadGroup().getName() : "n/a") .append(", isDaemon: ") .append(t.isDaemon()) .append(", Id: ") .append(t.getId()) .append("]"); return sb.toString(); } private class SerializedEmitFailureHandler implements Sinks.EmitFailureHandler { @Override public boolean onEmitFailure(SignalType signalType, Sinks.EmitResult emitResult) { logger.debug("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); if (emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED)) { return true; } return false; } } }
class BulkExecutor<TContext> { private final static Logger logger = LoggerFactory.getLogger(BulkExecutor.class); private final static AtomicLong instanceCount = new AtomicLong(0); private final CosmosAsyncContainer container; private final AsyncDocumentClient docClientWrapper; private final String operationContextText; private final OperationContextAndListenerTuple operationListener; private final ThrottlingRetryOptions throttlingRetryOptions; private final Flux<com.azure.cosmos.models.CosmosItemOperation> inputOperations; private final Long maxMicroBatchIntervalInMs; private final TContext batchContext; private final ConcurrentMap<String, PartitionScopeThresholds> partitionScopeThresholds; private final CosmosBulkExecutionOptions cosmosBulkExecutionOptions; private final AtomicBoolean mainSourceCompleted; private final AtomicInteger totalCount; private final Sinks.EmitFailureHandler serializedEmitFailureHandler; private final Sinks.Many<CosmosItemOperation> mainSink; private final List<FluxSink<CosmosItemOperation>> groupSinks; private final ScheduledExecutorService executorService; private ScheduledFuture<?> scheduledFutureForFlush; public BulkExecutor(CosmosAsyncContainer container, Flux<CosmosItemOperation> inputOperations, CosmosBulkExecutionOptions cosmosBulkOptions) { checkNotNull(container, "expected non-null container"); checkNotNull(inputOperations, "expected non-null inputOperations"); checkNotNull(cosmosBulkOptions, "expected non-null bulkOptions"); this.cosmosBulkExecutionOptions = cosmosBulkOptions; this.container = container; this.inputOperations = inputOperations; this.docClientWrapper = CosmosBridgeInternal.getAsyncDocumentClient(container.getDatabase()); this.throttlingRetryOptions = docClientWrapper.getConnectionPolicy().getThrottlingRetryOptions(); maxMicroBatchIntervalInMs = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchInterval(cosmosBulkExecutionOptions) .toMillis(); batchContext = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getLegacyBatchScopedContext(cosmosBulkExecutionOptions); this.partitionScopeThresholds = ImplementationBridgeHelpers.CosmosBulkExecutionThresholdsStateHelper .getBulkExecutionThresholdsAccessor() .getPartitionScopeThresholds(cosmosBulkExecutionOptions.getThresholdsState()); operationListener = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getOperationContext(cosmosBulkExecutionOptions); if (operationListener != null && operationListener.getOperationContext() != null) { operationContextText = operationListener.getOperationContext().toString(); } else { operationContextText = "n/a"; } mainSourceCompleted = new AtomicBoolean(false); totalCount = new AtomicInteger(0); serializedEmitFailureHandler = new SerializedEmitFailureHandler(); mainSink = Sinks.many().unicast().onBackpressureBuffer(); groupSinks = new CopyOnWriteArrayList<>(); this.executorService = Executors.newSingleThreadScheduledExecutor( new CosmosDaemonThreadFactory("BulkExecutor-" + instanceCount.incrementAndGet())); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, this.maxMicroBatchIntervalInMs, this.maxMicroBatchIntervalInMs, TimeUnit.MILLISECONDS); } public Flux<CosmosBulkOperationResponse<TContext>> execute() { } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionedGroup( GroupedFlux<PartitionScopeThresholds, CosmosItemOperation> partitionedGroupFluxOfInputOperations) { final PartitionScopeThresholds thresholds = partitionedGroupFluxOfInputOperations.key(); final FluxProcessor<CosmosItemOperation, CosmosItemOperation> groupFluxProcessor = UnicastProcessor.<CosmosItemOperation>create().serialize(); final FluxSink<CosmosItemOperation> groupSink = groupFluxProcessor.sink(FluxSink.OverflowStrategy.BUFFER); groupSinks.add(groupSink); AtomicLong firstRecordTimeStamp = new AtomicLong(-1); AtomicLong currentMicroBatchSize = new AtomicLong(0); AtomicInteger currentTotalSerializedLength = new AtomicInteger(0); return partitionedGroupFluxOfInputOperations .mergeWith(groupFluxProcessor) .onBackpressureBuffer() .timestamp() .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .bufferUntil(timeStampItemOperationTuple -> { long timestamp = timeStampItemOperationTuple.getT1(); CosmosItemOperation itemOperation = timeStampItemOperationTuple.getT2(); logger.trace( "BufferUntil - enqueued {}, {}, Context: {} {}", timestamp, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (itemOperation == FlushBuffersItemOperation.singleton()) { long currentMicroBatchSizeSnapshot = currentMicroBatchSize.get(); if (currentMicroBatchSizeSnapshot > 0) { logger.trace( "Flushing PKRange {} (batch size: {}) due to FlushItemOperation, Context: {} {}", thresholds.getPartitionKeyRangeId(), currentMicroBatchSizeSnapshot, this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; } firstRecordTimeStamp.compareAndSet(-1, timestamp); long age = timestamp - firstRecordTimeStamp.get(); long batchSize = currentMicroBatchSize.incrementAndGet(); int totalSerializedLength = this.calculateTotalSerializedLength(currentTotalSerializedLength, itemOperation); if (batchSize >= thresholds.getTargetMicroBatchSizeSnapshot() || age >= this.maxMicroBatchIntervalInMs || totalSerializedLength >= BatchRequestResponseConstants.MAX_DIRECT_MODE_BATCH_REQUEST_BODY_SIZE_IN_BYTES) { logger.debug( "BufferUntil - Flushing PKRange {} due to BatchSize ({}), payload size ({}) or age ({}), " + "Triggering {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), batchSize, totalSerializedLength, age, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; }) .flatMap( (List<Tuple2<Long, CosmosItemOperation>> timeStampAndItemOperationTuples) -> { List<CosmosItemOperation> operations = new ArrayList<>(timeStampAndItemOperationTuples.size()); for (Tuple2<Long, CosmosItemOperation> timeStampAndItemOperationTuple : timeStampAndItemOperationTuples) { CosmosItemOperation itemOperation = timeStampAndItemOperationTuple.getT2(); if (itemOperation == FlushBuffersItemOperation.singleton()) { continue; } operations.add(itemOperation); } logger.debug( "Flushing PKRange {} micro batch with {} operations, Context: {} {}", thresholds.getPartitionKeyRangeId(), operations.size(), this.operationContextText, getThreadInfo()); return executeOperations(operations, thresholds, groupSink); }, ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchConcurrency(this.cosmosBulkExecutionOptions)); } private int calculateTotalSerializedLength(AtomicInteger currentTotalSerializedLength, CosmosItemOperation item) { if (item instanceof CosmosItemOperationBase) { return currentTotalSerializedLength.accumulateAndGet( ((CosmosItemOperationBase) item).getSerializedLength(), (currentValue, incremental) -> currentValue + incremental); } return currentTotalSerializedLength.get(); } private Flux<CosmosBulkOperationResponse<TContext>> executeOperations( List<CosmosItemOperation> operations, PartitionScopeThresholds thresholds, FluxSink<CosmosItemOperation> groupSink) { if (operations.size() == 0) { logger.trace("Empty operations list, Context: {}", this.operationContextText); return Flux.empty(); } String pkRange = thresholds.getPartitionKeyRangeId(); ServerOperationBatchRequest serverOperationBatchRequest = BulkExecutorUtil.createBatchRequest(operations, pkRange); if (serverOperationBatchRequest.getBatchPendingOperations().size() > 0) { serverOperationBatchRequest.getBatchPendingOperations().forEach(groupSink::next); } return Flux.just(serverOperationBatchRequest.getBatchRequest()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((PartitionKeyRangeServerBatchRequest serverRequest) -> this.executePartitionKeyRangeServerBatchRequest(serverRequest, groupSink, thresholds)); } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionKeyRangeServerBatchRequest( PartitionKeyRangeServerBatchRequest serverRequest, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { return this.executeBatchRequest(serverRequest) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(response -> Flux .fromIterable(response.getResults()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosBatchOperationResult result) -> handleTransactionalBatchOperationResult(response, result, groupSink, thresholds))) .onErrorResume((Throwable throwable) -> { if (!(throwable instanceof Exception)) { throw Exceptions.propagate(throwable); } Exception exception = (Exception) throwable; return Flux .fromIterable(serverRequest.getOperations()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosItemOperation itemOperation) -> handleTransactionalBatchExecutionException(itemOperation, exception, groupSink, thresholds)); }); } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchOperationResult( CosmosBatchResponse response, CosmosBatchOperationResult operationResult, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { CosmosBulkItemResponse cosmosBulkItemResponse = ModelBridgeInternal .createCosmosBulkItemResponse(operationResult, response); CosmosItemOperation itemOperation = operationResult.getOperation(); TContext actualContext = this.getActualContext(itemOperation); logger.debug( "HandleTransactionalBatchOperationResult - PKRange {}, Response Status Code {}, " + "Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (!operationResult.isSuccessStatusCode()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy().shouldRetry(operationResult).flatMap( result -> { if (result.shouldRetry) { logger.debug( "HandleTransactionalBatchOperationResult - enqueue retry, PKRange {}, Response " + "Status Code {}, Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return this.enqueueForRetry(result.backOffTime, groupSink, itemOperation, thresholds); } else { logger.error( "HandleTransactionalBatchOperationResult - Fail, PKRange {}, Response Status " + "Code {}, Operation Status Code {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } }); } else { throw new UnsupportedOperationException("Unknown CosmosItemOperation."); } } thresholds.recordSuccessfulOperation(); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } private TContext getActualContext(CosmosItemOperation itemOperation) { ItemBulkOperation<?, ?> itemBulkOperation = null; if (itemOperation instanceof ItemBulkOperation<?, ?>) { itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; } if (itemBulkOperation == null) { return this.batchContext; } TContext operationContext = itemBulkOperation.getContext(); if (operationContext != null) { return operationContext; } return this.batchContext; } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchExecutionException( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { logger.debug( "HandleTransactionalBatchExecutionException, PKRange {}, Error: {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (exception instanceof CosmosException && itemOperation instanceof ItemBulkOperation<?, ?>) { CosmosException cosmosException = (CosmosException) exception; ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy() .shouldRetryForGone(cosmosException.getStatusCode(), cosmosException.getSubStatusCode()) .flatMap(shouldRetryGone -> { if (shouldRetryGone) { logger.debug( "HandleTransactionalBatchExecutionException - Retry due to split, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); mainSink.emitNext(itemOperation, serializedEmitFailureHandler); return Mono.empty(); } else { logger.debug( "HandleTransactionalBatchExecutionException - Retry other, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return retryOtherExceptions( itemOperation, exception, groupSink, cosmosException, itemBulkOperation, thresholds); } }); } TContext actualContext = this.getActualContext(itemOperation); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse(itemOperation, exception, actualContext)); } private Mono<CosmosBulkOperationResponse<TContext>> enqueueForRetry( Duration backOffTime, FluxSink<CosmosItemOperation> groupSink, CosmosItemOperation itemOperation, PartitionScopeThresholds thresholds) { thresholds.recordEnqueuedRetry(); if (backOffTime == null || backOffTime.isZero()) { groupSink.next(itemOperation); return Mono.empty(); } else { return Mono .delay(backOffTime) .flatMap((dummy) -> { groupSink.next(itemOperation); return Mono.empty(); }); } } private Mono<CosmosBulkOperationResponse<TContext>> retryOtherExceptions( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, CosmosException cosmosException, ItemBulkOperation<?, ?> itemBulkOperation, PartitionScopeThresholds thresholds) { TContext actualContext = this.getActualContext(itemOperation); return itemBulkOperation.getRetryPolicy().shouldRetry(cosmosException).flatMap(result -> { if (result.shouldRetry) { return this.enqueueForRetry(result.backOffTime, groupSink, itemBulkOperation, thresholds); } else { return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, exception, actualContext)); } }); } private Mono<CosmosBatchResponse> executeBatchRequest(PartitionKeyRangeServerBatchRequest serverRequest) { RequestOptions options = new RequestOptions(); options.setOperationContextAndListenerTuple(operationListener); if (!this.docClientWrapper.isContentResponseOnWriteEnabled() && serverRequest.getOperations().size() > 0) { for (CosmosItemOperation itemOperation : serverRequest.getOperations()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; if (itemBulkOperation.getOperationType() == CosmosItemOperationType.READ || (itemBulkOperation.getRequestOptions() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled().booleanValue())) { options.setContentResponseOnWriteEnabled(true); break; } } } } return this.docClientWrapper.executeBatchRequest( BridgeInternal.getLink(this.container), serverRequest, options, false); } private void completeAllSinks() { logger.info("Closing all sinks, Context: {}", this.operationContextText); executorService.shutdown(); logger.debug("Executor service shut down, Context: {}", this.operationContextText); mainSink.tryEmitComplete(); logger.debug("Main sink completed, Context: {}", this.operationContextText); groupSinks.forEach(FluxSink::complete); logger.debug("All group sinks completed, Context: {}", this.operationContextText); try { this.executorService.shutdown(); logger.debug("Shutting down the executor service"); } catch (Exception e) { logger.warn("Failed to shut down the executor service", e); } } private void onFlush() { try { this.groupSinks.forEach(sink -> sink.next(FlushBuffersItemOperation.singleton())); } catch(Throwable t) { logger.error("Callback invocation 'onFlush' failed.", t); } } private static String getItemOperationDiagnostics(CosmosItemOperation operation) { if (operation == FlushBuffersItemOperation.singleton()) { return "ItemOperation[Type: Flush]"; } StringBuilder sb = new StringBuilder(); sb .append("ItemOperation[Type: ") .append(operation.getOperationType().toString()) .append(", PK: ") .append(operation.getPartitionKeyValue() != null ? operation.getPartitionKeyValue().toString() : "n/a") .append(", id: ") .append(operation.getId()) .append("]"); return sb.toString(); } private static String getThreadInfo() { StringBuilder sb = new StringBuilder(); Thread t = Thread.currentThread(); sb .append("Thread[") .append("Name: ") .append(t.getName()) .append(",Group: ") .append(t.getThreadGroup() != null ? t.getThreadGroup().getName() : "n/a") .append(", isDaemon: ") .append(t.isDaemon()) .append(", Id: ") .append(t.getId()) .append("]"); return sb.toString(); } private class SerializedEmitFailureHandler implements Sinks.EmitFailureHandler { @Override public boolean onEmitFailure(SignalType signalType, Sinks.EmitResult emitResult) { if (emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED)) { logger.debug("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); return true; } logger.error("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); return false; } } }
by theory the number of the physical partitions can keep growing? wonder the perf about a container with 1000 partitions or more. feel like long long term, we should consider remove groupby.
public Flux<CosmosBulkOperationResponse<TContext>> execute() { Integer nullableMaxConcurrentCosmosPartitions = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxConcurrentCosmosPartitions(cosmosBulkExecutionOptions); Mono<Integer> maxConcurrentCosmosPartitionsMono = nullableMaxConcurrentCosmosPartitions != null ? Mono.just(Math.max(256, nullableMaxConcurrentCosmosPartitions)) : this.container.getFeedRanges().map(ranges -> Math.max(256, ranges.size() * 2)); return maxConcurrentCosmosPartitionsMono .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(maxConcurrentCosmosPartitions -> { logger.debug("BulkExecutor.execute with MaxConcurrentPartitions: {}, Context: {}", maxConcurrentCosmosPartitions, this.operationContextText); return this.inputOperations .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .onErrorContinue((throwable, o) -> logger.error("Skipping an error operation while processing {}. Cause: {}, Context: {}", o, throwable.getMessage(), this.operationContextText)) .doOnNext((CosmosItemOperation cosmosItemOperation) -> { BulkExecutorUtil.setRetryPolicyForBulk( docClientWrapper, this.container, cosmosItemOperation, this.throttlingRetryOptions); if (cosmosItemOperation != FlushBuffersItemOperation.singleton()) { totalCount.incrementAndGet(); } logger.trace( "SetupRetryPolicy, {}, TotalCount: {}, Context: {}, {}", getItemOperationDiagnostics(cosmosItemOperation), totalCount.get(), this.operationContextText, getThreadInfo() ); }) .doOnComplete(() -> { mainSourceCompleted.set(true); long totalCountSnapshot = totalCount.get(); logger.debug("Main source completed - totalCountSnapshot, this.operationContextText); if (totalCountSnapshot == 0) { completeAllSinks(); } else { ScheduledFuture<?> scheduledFutureSnapshot = this.scheduledFutureForFlush; if (scheduledFutureSnapshot != null) { try { scheduledFutureSnapshot.cancel(true); logger.debug("Cancelled all future scheduled tasks {}", getThreadInfo()); } catch (Exception e) { logger.warn("Failed to cancel scheduled tasks{}", getThreadInfo(), e); } } this.onFlush(); long flushIntervalAfterDrainingIncomingFlux = Math.min( this.maxMicroBatchIntervalInMs, BatchRequestResponseConstants .DEFAULT_MAX_MICRO_BATCH_INTERVAL_AFTER_DRAINING_INCOMING_FLUX_IN_MILLISECONDS); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, flushIntervalAfterDrainingIncomingFlux, flushIntervalAfterDrainingIncomingFlux, TimeUnit.MILLISECONDS); } }) .mergeWith(mainSink.asFlux()) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap( operation -> { logger.trace("Before Resolve PkRangeId, {}, Context: {} {}", getItemOperationDiagnostics(operation), this.operationContextText, getThreadInfo()); return BulkExecutorUtil.resolvePartitionKeyRangeId(this.docClientWrapper, this.container, operation) .map((String pkRangeId) -> { PartitionScopeThresholds partitionScopeThresholds = this.partitionScopeThresholds.computeIfAbsent( pkRangeId, (newPkRangeId) -> new PartitionScopeThresholds(newPkRangeId, this.cosmosBulkExecutionOptions)); logger.trace("Resolved PkRangeId, {}, PKRangeId: {} Context: {} {}", getItemOperationDiagnostics(operation), pkRangeId, this.operationContextText, getThreadInfo()); return Pair.of(partitionScopeThresholds, operation); }); }) .groupBy(Pair::getKey, Pair::getValue) .flatMap( this::executePartitionedGroup, maxConcurrentCosmosPartitions) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .doOnNext(requestAndResponse -> { int totalCountAfterDecrement = totalCount.decrementAndGet(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountAfterDecrement == 0 && mainSourceCompletedSnapshot) { logger.debug("All work completed, {}, TotalCount: {}, Context: {} {}", getItemOperationDiagnostics(requestAndResponse.getOperation()), totalCountAfterDecrement, this.operationContextText, getThreadInfo()); completeAllSinks(); } else { logger.debug( "Work left - TotalCount after decrement: {}, main sink completed {}, {}, Context: {} {}", totalCountAfterDecrement, mainSourceCompletedSnapshot, getItemOperationDiagnostics(requestAndResponse.getOperation()), this.operationContextText, getThreadInfo()); } }) .doOnComplete(() -> { int totalCountSnapshot = totalCount.get(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountSnapshot == 0 && mainSourceCompletedSnapshot) { logger.debug("DoOnComplete: All work completed, Context: {}", this.operationContextText); completeAllSinks(); } else { logger.debug( "DoOnComplete: Work left - TotalCount after decrement: {}, main sink completed {}, Context: {} {}", totalCountSnapshot, mainSourceCompletedSnapshot, this.operationContextText, getThreadInfo()); } }); }); }
this.container.getFeedRanges().map(ranges -> Math.max(256, ranges.size() * 2));
default concurrency (256), Integer nullableMaxConcurrentCosmosPartitions = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxConcurrentCosmosPartitions(cosmosBulkExecutionOptions); Mono<Integer> maxConcurrentCosmosPartitionsMono = nullableMaxConcurrentCosmosPartitions != null ? Mono.just(Math.max(256, nullableMaxConcurrentCosmosPartitions)) : this.container.getFeedRanges().map(ranges -> Math.max(256, ranges.size() * 2)); return maxConcurrentCosmosPartitionsMono .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(maxConcurrentCosmosPartitions -> { logger.debug("BulkExecutor.execute with MaxConcurrentPartitions: {}, Context: {}", maxConcurrentCosmosPartitions, this.operationContextText); return this.inputOperations .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .onErrorContinue((throwable, o) -> logger.error("Skipping an error operation while processing {}. Cause: {}, Context: {}", o, throwable.getMessage(), this.operationContextText)) .doOnNext((CosmosItemOperation cosmosItemOperation) -> { BulkExecutorUtil.setRetryPolicyForBulk( docClientWrapper, this.container, cosmosItemOperation, this.throttlingRetryOptions); if (cosmosItemOperation != FlushBuffersItemOperation.singleton()) { totalCount.incrementAndGet(); } logger.trace( "SetupRetryPolicy, {}, TotalCount: {}, Context: {}, {}", getItemOperationDiagnostics(cosmosItemOperation), totalCount.get(), this.operationContextText, getThreadInfo() ); }) .doOnComplete(() -> { mainSourceCompleted.set(true); long totalCountSnapshot = totalCount.get(); logger.debug("Main source completed - totalCountSnapshot, this.operationContextText); if (totalCountSnapshot == 0) { completeAllSinks(); } else { ScheduledFuture<?> scheduledFutureSnapshot = this.scheduledFutureForFlush; if (scheduledFutureSnapshot != null) { try { scheduledFutureSnapshot.cancel(true); logger.debug("Cancelled all future scheduled tasks {}", getThreadInfo()); } catch (Exception e) { logger.warn("Failed to cancel scheduled tasks{}", getThreadInfo(), e); } } this.onFlush(); long flushIntervalAfterDrainingIncomingFlux = Math.min( this.maxMicroBatchIntervalInMs, BatchRequestResponseConstants .DEFAULT_MAX_MICRO_BATCH_INTERVAL_AFTER_DRAINING_INCOMING_FLUX_IN_MILLISECONDS); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, flushIntervalAfterDrainingIncomingFlux, flushIntervalAfterDrainingIncomingFlux, TimeUnit.MILLISECONDS); } }) .mergeWith(mainSink.asFlux()) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap( operation -> { logger.trace("Before Resolve PkRangeId, {}, Context: {} {}", getItemOperationDiagnostics(operation), this.operationContextText, getThreadInfo()); return BulkExecutorUtil.resolvePartitionKeyRangeId(this.docClientWrapper, this.container, operation) .map((String pkRangeId) -> { PartitionScopeThresholds partitionScopeThresholds = this.partitionScopeThresholds.computeIfAbsent( pkRangeId, (newPkRangeId) -> new PartitionScopeThresholds(newPkRangeId, this.cosmosBulkExecutionOptions)); logger.trace("Resolved PkRangeId, {}, PKRangeId: {} Context: {} {}", getItemOperationDiagnostics(operation), pkRangeId, this.operationContextText, getThreadInfo()); return Pair.of(partitionScopeThresholds, operation); }); }) .groupBy(Pair::getKey, Pair::getValue) .flatMap( this::executePartitionedGroup, maxConcurrentCosmosPartitions) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .doOnNext(requestAndResponse -> { int totalCountAfterDecrement = totalCount.decrementAndGet(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountAfterDecrement == 0 && mainSourceCompletedSnapshot) { logger.debug("All work completed, {}, TotalCount: {}, Context: {} {}", getItemOperationDiagnostics(requestAndResponse.getOperation()), totalCountAfterDecrement, this.operationContextText, getThreadInfo()); completeAllSinks(); } else { logger.debug( "Work left - TotalCount after decrement: {}, main sink completed {}, {}, Context: {} {}", totalCountAfterDecrement, mainSourceCompletedSnapshot, getItemOperationDiagnostics(requestAndResponse.getOperation()), this.operationContextText, getThreadInfo()); } }) .doOnComplete(() -> { int totalCountSnapshot = totalCount.get(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountSnapshot == 0 && mainSourceCompletedSnapshot) { logger.debug("DoOnComplete: All work completed, Context: {}", this.operationContextText); completeAllSinks(); } else { logger.debug( "DoOnComplete: Work left - TotalCount after decrement: {}, main sink completed {}, Context: {} {}", totalCountSnapshot, mainSourceCompletedSnapshot, this.operationContextText, getThreadInfo()); } }); }
class BulkExecutor<TContext> { private final static Logger logger = LoggerFactory.getLogger(BulkExecutor.class); private final static AtomicLong instanceCount = new AtomicLong(0); private final CosmosAsyncContainer container; private final AsyncDocumentClient docClientWrapper; private final String operationContextText; private final OperationContextAndListenerTuple operationListener; private final ThrottlingRetryOptions throttlingRetryOptions; private final Flux<com.azure.cosmos.models.CosmosItemOperation> inputOperations; private final Long maxMicroBatchIntervalInMs; private final TContext batchContext; private final ConcurrentMap<String, PartitionScopeThresholds> partitionScopeThresholds; private final CosmosBulkExecutionOptions cosmosBulkExecutionOptions; private final AtomicBoolean mainSourceCompleted; private final AtomicInteger totalCount; private final Sinks.EmitFailureHandler serializedEmitFailureHandler; private final Sinks.Many<CosmosItemOperation> mainSink; private final List<FluxSink<CosmosItemOperation>> groupSinks; private final ScheduledExecutorService executorService; private ScheduledFuture<?> scheduledFutureForFlush; public BulkExecutor(CosmosAsyncContainer container, Flux<CosmosItemOperation> inputOperations, CosmosBulkExecutionOptions cosmosBulkOptions) { checkNotNull(container, "expected non-null container"); checkNotNull(inputOperations, "expected non-null inputOperations"); checkNotNull(cosmosBulkOptions, "expected non-null bulkOptions"); this.cosmosBulkExecutionOptions = cosmosBulkOptions; this.container = container; this.inputOperations = inputOperations; this.docClientWrapper = CosmosBridgeInternal.getAsyncDocumentClient(container.getDatabase()); this.throttlingRetryOptions = docClientWrapper.getConnectionPolicy().getThrottlingRetryOptions(); maxMicroBatchIntervalInMs = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchInterval(cosmosBulkExecutionOptions) .toMillis(); batchContext = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getLegacyBatchScopedContext(cosmosBulkExecutionOptions); this.partitionScopeThresholds = ImplementationBridgeHelpers.CosmosBulkExecutionThresholdsStateHelper .getBulkExecutionThresholdsAccessor() .getPartitionScopeThresholds(cosmosBulkExecutionOptions.getThresholdsState()); operationListener = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getOperationContext(cosmosBulkExecutionOptions); if (operationListener != null && operationListener.getOperationContext() != null) { operationContextText = operationListener.getOperationContext().toString(); } else { operationContextText = "n/a"; } mainSourceCompleted = new AtomicBoolean(false); totalCount = new AtomicInteger(0); serializedEmitFailureHandler = new SerializedEmitFailureHandler(); mainSink = Sinks.many().unicast().onBackpressureBuffer(); groupSinks = new CopyOnWriteArrayList<>(); this.executorService = Executors.newSingleThreadScheduledExecutor( new CosmosDaemonThreadFactory("BulkExecutor-" + instanceCount.incrementAndGet())); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, this.maxMicroBatchIntervalInMs, this.maxMicroBatchIntervalInMs, TimeUnit.MILLISECONDS); } public Flux<CosmosBulkOperationResponse<TContext>> execute() { } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionedGroup( GroupedFlux<PartitionScopeThresholds, CosmosItemOperation> partitionedGroupFluxOfInputOperations) { final PartitionScopeThresholds thresholds = partitionedGroupFluxOfInputOperations.key(); final FluxProcessor<CosmosItemOperation, CosmosItemOperation> groupFluxProcessor = UnicastProcessor.<CosmosItemOperation>create().serialize(); final FluxSink<CosmosItemOperation> groupSink = groupFluxProcessor.sink(FluxSink.OverflowStrategy.BUFFER); groupSinks.add(groupSink); AtomicLong firstRecordTimeStamp = new AtomicLong(-1); AtomicLong currentMicroBatchSize = new AtomicLong(0); AtomicInteger currentTotalSerializedLength = new AtomicInteger(0); return partitionedGroupFluxOfInputOperations .mergeWith(groupFluxProcessor) .onBackpressureBuffer() .timestamp() .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .bufferUntil(timeStampItemOperationTuple -> { long timestamp = timeStampItemOperationTuple.getT1(); CosmosItemOperation itemOperation = timeStampItemOperationTuple.getT2(); logger.trace( "BufferUntil - enqueued {}, {}, Context: {} {}", timestamp, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (itemOperation == FlushBuffersItemOperation.singleton()) { long currentMicroBatchSizeSnapshot = currentMicroBatchSize.get(); if (currentMicroBatchSizeSnapshot > 0) { logger.trace( "Flushing PKRange {} (batch size: {}) due to FlushItemOperation, Context: {} {}", thresholds.getPartitionKeyRangeId(), currentMicroBatchSizeSnapshot, this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; } firstRecordTimeStamp.compareAndSet(-1, timestamp); long age = timestamp - firstRecordTimeStamp.get(); long batchSize = currentMicroBatchSize.incrementAndGet(); int totalSerializedLength = this.calculateTotalSerializedLength(currentTotalSerializedLength, itemOperation); if (batchSize >= thresholds.getTargetMicroBatchSizeSnapshot() || age >= this.maxMicroBatchIntervalInMs || totalSerializedLength >= BatchRequestResponseConstants.MAX_DIRECT_MODE_BATCH_REQUEST_BODY_SIZE_IN_BYTES) { logger.debug( "BufferUntil - Flushing PKRange {} due to BatchSize ({}), payload size ({}) or age ({}), " + "Triggering {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), batchSize, totalSerializedLength, age, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; }) .flatMap( (List<Tuple2<Long, CosmosItemOperation>> timeStampAndItemOperationTuples) -> { List<CosmosItemOperation> operations = new ArrayList<>(timeStampAndItemOperationTuples.size()); for (Tuple2<Long, CosmosItemOperation> timeStampAndItemOperationTuple : timeStampAndItemOperationTuples) { CosmosItemOperation itemOperation = timeStampAndItemOperationTuple.getT2(); if (itemOperation == FlushBuffersItemOperation.singleton()) { continue; } operations.add(itemOperation); } logger.debug( "Flushing PKRange {} micro batch with {} operations, Context: {} {}", thresholds.getPartitionKeyRangeId(), operations.size(), this.operationContextText, getThreadInfo()); return executeOperations(operations, thresholds, groupSink); }, ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchConcurrency(this.cosmosBulkExecutionOptions)); } private int calculateTotalSerializedLength(AtomicInteger currentTotalSerializedLength, CosmosItemOperation item) { if (item instanceof CosmosItemOperationBase) { return currentTotalSerializedLength.accumulateAndGet( ((CosmosItemOperationBase) item).getSerializedLength(), (currentValue, incremental) -> currentValue + incremental); } return currentTotalSerializedLength.get(); } private Flux<CosmosBulkOperationResponse<TContext>> executeOperations( List<CosmosItemOperation> operations, PartitionScopeThresholds thresholds, FluxSink<CosmosItemOperation> groupSink) { if (operations.size() == 0) { logger.trace("Empty operations list, Context: {}", this.operationContextText); return Flux.empty(); } String pkRange = thresholds.getPartitionKeyRangeId(); ServerOperationBatchRequest serverOperationBatchRequest = BulkExecutorUtil.createBatchRequest(operations, pkRange); if (serverOperationBatchRequest.getBatchPendingOperations().size() > 0) { serverOperationBatchRequest.getBatchPendingOperations().forEach(groupSink::next); } return Flux.just(serverOperationBatchRequest.getBatchRequest()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((PartitionKeyRangeServerBatchRequest serverRequest) -> this.executePartitionKeyRangeServerBatchRequest(serverRequest, groupSink, thresholds)); } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionKeyRangeServerBatchRequest( PartitionKeyRangeServerBatchRequest serverRequest, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { return this.executeBatchRequest(serverRequest) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(response -> Flux .fromIterable(response.getResults()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosBatchOperationResult result) -> handleTransactionalBatchOperationResult(response, result, groupSink, thresholds))) .onErrorResume((Throwable throwable) -> { if (!(throwable instanceof Exception)) { throw Exceptions.propagate(throwable); } Exception exception = (Exception) throwable; return Flux .fromIterable(serverRequest.getOperations()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosItemOperation itemOperation) -> handleTransactionalBatchExecutionException(itemOperation, exception, groupSink, thresholds)); }); } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchOperationResult( CosmosBatchResponse response, CosmosBatchOperationResult operationResult, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { CosmosBulkItemResponse cosmosBulkItemResponse = ModelBridgeInternal .createCosmosBulkItemResponse(operationResult, response); CosmosItemOperation itemOperation = operationResult.getOperation(); TContext actualContext = this.getActualContext(itemOperation); logger.debug( "HandleTransactionalBatchOperationResult - PKRange {}, Response Status Code {}, " + "Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (!operationResult.isSuccessStatusCode()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy().shouldRetry(operationResult).flatMap( result -> { if (result.shouldRetry) { logger.debug( "HandleTransactionalBatchOperationResult - enqueue retry, PKRange {}, Response " + "Status Code {}, Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return this.enqueueForRetry(result.backOffTime, groupSink, itemOperation, thresholds); } else { logger.error( "HandleTransactionalBatchOperationResult - Fail, PKRange {}, Response Status " + "Code {}, Operation Status Code {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } }); } else { throw new UnsupportedOperationException("Unknown CosmosItemOperation."); } } thresholds.recordSuccessfulOperation(); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } private TContext getActualContext(CosmosItemOperation itemOperation) { ItemBulkOperation<?, ?> itemBulkOperation = null; if (itemOperation instanceof ItemBulkOperation<?, ?>) { itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; } if (itemBulkOperation == null) { return this.batchContext; } TContext operationContext = itemBulkOperation.getContext(); if (operationContext != null) { return operationContext; } return this.batchContext; } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchExecutionException( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { logger.debug( "HandleTransactionalBatchExecutionException, PKRange {}, Error: {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (exception instanceof CosmosException && itemOperation instanceof ItemBulkOperation<?, ?>) { CosmosException cosmosException = (CosmosException) exception; ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy() .shouldRetryForGone(cosmosException.getStatusCode(), cosmosException.getSubStatusCode()) .flatMap(shouldRetryGone -> { if (shouldRetryGone) { logger.debug( "HandleTransactionalBatchExecutionException - Retry due to split, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); mainSink.emitNext(itemOperation, serializedEmitFailureHandler); return Mono.empty(); } else { logger.debug( "HandleTransactionalBatchExecutionException - Retry other, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return retryOtherExceptions( itemOperation, exception, groupSink, cosmosException, itemBulkOperation, thresholds); } }); } TContext actualContext = this.getActualContext(itemOperation); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse(itemOperation, exception, actualContext)); } private Mono<CosmosBulkOperationResponse<TContext>> enqueueForRetry( Duration backOffTime, FluxSink<CosmosItemOperation> groupSink, CosmosItemOperation itemOperation, PartitionScopeThresholds thresholds) { thresholds.recordEnqueuedRetry(); if (backOffTime == null || backOffTime.isZero()) { groupSink.next(itemOperation); return Mono.empty(); } else { return Mono .delay(backOffTime) .flatMap((dummy) -> { groupSink.next(itemOperation); return Mono.empty(); }); } } private Mono<CosmosBulkOperationResponse<TContext>> retryOtherExceptions( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, CosmosException cosmosException, ItemBulkOperation<?, ?> itemBulkOperation, PartitionScopeThresholds thresholds) { TContext actualContext = this.getActualContext(itemOperation); return itemBulkOperation.getRetryPolicy().shouldRetry(cosmosException).flatMap(result -> { if (result.shouldRetry) { return this.enqueueForRetry(result.backOffTime, groupSink, itemBulkOperation, thresholds); } else { return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, exception, actualContext)); } }); } private Mono<CosmosBatchResponse> executeBatchRequest(PartitionKeyRangeServerBatchRequest serverRequest) { RequestOptions options = new RequestOptions(); options.setOperationContextAndListenerTuple(operationListener); if (!this.docClientWrapper.isContentResponseOnWriteEnabled() && serverRequest.getOperations().size() > 0) { for (CosmosItemOperation itemOperation : serverRequest.getOperations()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; if (itemBulkOperation.getOperationType() == CosmosItemOperationType.READ || (itemBulkOperation.getRequestOptions() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled().booleanValue())) { options.setContentResponseOnWriteEnabled(true); break; } } } } return this.docClientWrapper.executeBatchRequest( BridgeInternal.getLink(this.container), serverRequest, options, false); } private void completeAllSinks() { logger.info("Closing all sinks, Context: {}", this.operationContextText); executorService.shutdown(); logger.debug("Executor service shut down, Context: {}", this.operationContextText); mainSink.tryEmitComplete(); logger.debug("Main sink completed, Context: {}", this.operationContextText); groupSinks.forEach(FluxSink::complete); logger.debug("All group sinks completed, Context: {}", this.operationContextText); try { this.executorService.shutdown(); logger.debug("Shutting down the executor service"); } catch (Exception e) { logger.warn("Failed to shut down the executor service", e); } } private void onFlush() { try { this.groupSinks.forEach(sink -> sink.next(FlushBuffersItemOperation.singleton())); } catch(Throwable t) { logger.error("Callback invocation 'onFlush' failed.", t); } } private static String getItemOperationDiagnostics(CosmosItemOperation operation) { if (operation == FlushBuffersItemOperation.singleton()) { return "ItemOperation[Type: Flush]"; } StringBuilder sb = new StringBuilder(); sb .append("ItemOperation[Type: ") .append(operation.getOperationType().toString()) .append(", PK: ") .append(operation.getPartitionKeyValue() != null ? operation.getPartitionKeyValue().toString() : "n/a") .append(", id: ") .append(operation.getId()) .append("]"); return sb.toString(); } private static String getThreadInfo() { StringBuilder sb = new StringBuilder(); Thread t = Thread.currentThread(); sb .append("Thread[") .append("Name: ") .append(t.getName()) .append(",Group: ") .append(t.getThreadGroup() != null ? t.getThreadGroup().getName() : "n/a") .append(", isDaemon: ") .append(t.isDaemon()) .append(", Id: ") .append(t.getId()) .append("]"); return sb.toString(); } private class SerializedEmitFailureHandler implements Sinks.EmitFailureHandler { @Override public boolean onEmitFailure(SignalType signalType, Sinks.EmitResult emitResult) { logger.debug("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); if (emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED)) { return true; } return false; } } }
class BulkExecutor<TContext> { private final static Logger logger = LoggerFactory.getLogger(BulkExecutor.class); private final static AtomicLong instanceCount = new AtomicLong(0); private final CosmosAsyncContainer container; private final AsyncDocumentClient docClientWrapper; private final String operationContextText; private final OperationContextAndListenerTuple operationListener; private final ThrottlingRetryOptions throttlingRetryOptions; private final Flux<com.azure.cosmos.models.CosmosItemOperation> inputOperations; private final Long maxMicroBatchIntervalInMs; private final TContext batchContext; private final ConcurrentMap<String, PartitionScopeThresholds> partitionScopeThresholds; private final CosmosBulkExecutionOptions cosmosBulkExecutionOptions; private final AtomicBoolean mainSourceCompleted; private final AtomicInteger totalCount; private final Sinks.EmitFailureHandler serializedEmitFailureHandler; private final Sinks.Many<CosmosItemOperation> mainSink; private final List<FluxSink<CosmosItemOperation>> groupSinks; private final ScheduledExecutorService executorService; private ScheduledFuture<?> scheduledFutureForFlush; public BulkExecutor(CosmosAsyncContainer container, Flux<CosmosItemOperation> inputOperations, CosmosBulkExecutionOptions cosmosBulkOptions) { checkNotNull(container, "expected non-null container"); checkNotNull(inputOperations, "expected non-null inputOperations"); checkNotNull(cosmosBulkOptions, "expected non-null bulkOptions"); this.cosmosBulkExecutionOptions = cosmosBulkOptions; this.container = container; this.inputOperations = inputOperations; this.docClientWrapper = CosmosBridgeInternal.getAsyncDocumentClient(container.getDatabase()); this.throttlingRetryOptions = docClientWrapper.getConnectionPolicy().getThrottlingRetryOptions(); maxMicroBatchIntervalInMs = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchInterval(cosmosBulkExecutionOptions) .toMillis(); batchContext = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getLegacyBatchScopedContext(cosmosBulkExecutionOptions); this.partitionScopeThresholds = ImplementationBridgeHelpers.CosmosBulkExecutionThresholdsStateHelper .getBulkExecutionThresholdsAccessor() .getPartitionScopeThresholds(cosmosBulkExecutionOptions.getThresholdsState()); operationListener = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getOperationContext(cosmosBulkExecutionOptions); if (operationListener != null && operationListener.getOperationContext() != null) { operationContextText = operationListener.getOperationContext().toString(); } else { operationContextText = "n/a"; } mainSourceCompleted = new AtomicBoolean(false); totalCount = new AtomicInteger(0); serializedEmitFailureHandler = new SerializedEmitFailureHandler(); mainSink = Sinks.many().unicast().onBackpressureBuffer(); groupSinks = new CopyOnWriteArrayList<>(); this.executorService = Executors.newSingleThreadScheduledExecutor( new CosmosDaemonThreadFactory("BulkExecutor-" + instanceCount.incrementAndGet())); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, this.maxMicroBatchIntervalInMs, this.maxMicroBatchIntervalInMs, TimeUnit.MILLISECONDS); } public Flux<CosmosBulkOperationResponse<TContext>> execute() { } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionedGroup( GroupedFlux<PartitionScopeThresholds, CosmosItemOperation> partitionedGroupFluxOfInputOperations) { final PartitionScopeThresholds thresholds = partitionedGroupFluxOfInputOperations.key(); final FluxProcessor<CosmosItemOperation, CosmosItemOperation> groupFluxProcessor = UnicastProcessor.<CosmosItemOperation>create().serialize(); final FluxSink<CosmosItemOperation> groupSink = groupFluxProcessor.sink(FluxSink.OverflowStrategy.BUFFER); groupSinks.add(groupSink); AtomicLong firstRecordTimeStamp = new AtomicLong(-1); AtomicLong currentMicroBatchSize = new AtomicLong(0); AtomicInteger currentTotalSerializedLength = new AtomicInteger(0); return partitionedGroupFluxOfInputOperations .mergeWith(groupFluxProcessor) .onBackpressureBuffer() .timestamp() .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .bufferUntil(timeStampItemOperationTuple -> { long timestamp = timeStampItemOperationTuple.getT1(); CosmosItemOperation itemOperation = timeStampItemOperationTuple.getT2(); logger.trace( "BufferUntil - enqueued {}, {}, Context: {} {}", timestamp, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (itemOperation == FlushBuffersItemOperation.singleton()) { long currentMicroBatchSizeSnapshot = currentMicroBatchSize.get(); if (currentMicroBatchSizeSnapshot > 0) { logger.trace( "Flushing PKRange {} (batch size: {}) due to FlushItemOperation, Context: {} {}", thresholds.getPartitionKeyRangeId(), currentMicroBatchSizeSnapshot, this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; } firstRecordTimeStamp.compareAndSet(-1, timestamp); long age = timestamp - firstRecordTimeStamp.get(); long batchSize = currentMicroBatchSize.incrementAndGet(); int totalSerializedLength = this.calculateTotalSerializedLength(currentTotalSerializedLength, itemOperation); if (batchSize >= thresholds.getTargetMicroBatchSizeSnapshot() || age >= this.maxMicroBatchIntervalInMs || totalSerializedLength >= BatchRequestResponseConstants.MAX_DIRECT_MODE_BATCH_REQUEST_BODY_SIZE_IN_BYTES) { logger.debug( "BufferUntil - Flushing PKRange {} due to BatchSize ({}), payload size ({}) or age ({}), " + "Triggering {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), batchSize, totalSerializedLength, age, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; }) .flatMap( (List<Tuple2<Long, CosmosItemOperation>> timeStampAndItemOperationTuples) -> { List<CosmosItemOperation> operations = new ArrayList<>(timeStampAndItemOperationTuples.size()); for (Tuple2<Long, CosmosItemOperation> timeStampAndItemOperationTuple : timeStampAndItemOperationTuples) { CosmosItemOperation itemOperation = timeStampAndItemOperationTuple.getT2(); if (itemOperation == FlushBuffersItemOperation.singleton()) { continue; } operations.add(itemOperation); } logger.debug( "Flushing PKRange {} micro batch with {} operations, Context: {} {}", thresholds.getPartitionKeyRangeId(), operations.size(), this.operationContextText, getThreadInfo()); return executeOperations(operations, thresholds, groupSink); }, ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchConcurrency(this.cosmosBulkExecutionOptions)); } private int calculateTotalSerializedLength(AtomicInteger currentTotalSerializedLength, CosmosItemOperation item) { if (item instanceof CosmosItemOperationBase) { return currentTotalSerializedLength.accumulateAndGet( ((CosmosItemOperationBase) item).getSerializedLength(), (currentValue, incremental) -> currentValue + incremental); } return currentTotalSerializedLength.get(); } private Flux<CosmosBulkOperationResponse<TContext>> executeOperations( List<CosmosItemOperation> operations, PartitionScopeThresholds thresholds, FluxSink<CosmosItemOperation> groupSink) { if (operations.size() == 0) { logger.trace("Empty operations list, Context: {}", this.operationContextText); return Flux.empty(); } String pkRange = thresholds.getPartitionKeyRangeId(); ServerOperationBatchRequest serverOperationBatchRequest = BulkExecutorUtil.createBatchRequest(operations, pkRange); if (serverOperationBatchRequest.getBatchPendingOperations().size() > 0) { serverOperationBatchRequest.getBatchPendingOperations().forEach(groupSink::next); } return Flux.just(serverOperationBatchRequest.getBatchRequest()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((PartitionKeyRangeServerBatchRequest serverRequest) -> this.executePartitionKeyRangeServerBatchRequest(serverRequest, groupSink, thresholds)); } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionKeyRangeServerBatchRequest( PartitionKeyRangeServerBatchRequest serverRequest, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { return this.executeBatchRequest(serverRequest) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(response -> Flux .fromIterable(response.getResults()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosBatchOperationResult result) -> handleTransactionalBatchOperationResult(response, result, groupSink, thresholds))) .onErrorResume((Throwable throwable) -> { if (!(throwable instanceof Exception)) { throw Exceptions.propagate(throwable); } Exception exception = (Exception) throwable; return Flux .fromIterable(serverRequest.getOperations()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosItemOperation itemOperation) -> handleTransactionalBatchExecutionException(itemOperation, exception, groupSink, thresholds)); }); } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchOperationResult( CosmosBatchResponse response, CosmosBatchOperationResult operationResult, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { CosmosBulkItemResponse cosmosBulkItemResponse = ModelBridgeInternal .createCosmosBulkItemResponse(operationResult, response); CosmosItemOperation itemOperation = operationResult.getOperation(); TContext actualContext = this.getActualContext(itemOperation); logger.debug( "HandleTransactionalBatchOperationResult - PKRange {}, Response Status Code {}, " + "Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (!operationResult.isSuccessStatusCode()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy().shouldRetry(operationResult).flatMap( result -> { if (result.shouldRetry) { logger.debug( "HandleTransactionalBatchOperationResult - enqueue retry, PKRange {}, Response " + "Status Code {}, Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return this.enqueueForRetry(result.backOffTime, groupSink, itemOperation, thresholds); } else { logger.error( "HandleTransactionalBatchOperationResult - Fail, PKRange {}, Response Status " + "Code {}, Operation Status Code {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } }); } else { throw new UnsupportedOperationException("Unknown CosmosItemOperation."); } } thresholds.recordSuccessfulOperation(); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } private TContext getActualContext(CosmosItemOperation itemOperation) { ItemBulkOperation<?, ?> itemBulkOperation = null; if (itemOperation instanceof ItemBulkOperation<?, ?>) { itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; } if (itemBulkOperation == null) { return this.batchContext; } TContext operationContext = itemBulkOperation.getContext(); if (operationContext != null) { return operationContext; } return this.batchContext; } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchExecutionException( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { logger.debug( "HandleTransactionalBatchExecutionException, PKRange {}, Error: {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (exception instanceof CosmosException && itemOperation instanceof ItemBulkOperation<?, ?>) { CosmosException cosmosException = (CosmosException) exception; ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy() .shouldRetryForGone(cosmosException.getStatusCode(), cosmosException.getSubStatusCode()) .flatMap(shouldRetryGone -> { if (shouldRetryGone) { logger.debug( "HandleTransactionalBatchExecutionException - Retry due to split, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); mainSink.emitNext(itemOperation, serializedEmitFailureHandler); return Mono.empty(); } else { logger.debug( "HandleTransactionalBatchExecutionException - Retry other, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return retryOtherExceptions( itemOperation, exception, groupSink, cosmosException, itemBulkOperation, thresholds); } }); } TContext actualContext = this.getActualContext(itemOperation); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse(itemOperation, exception, actualContext)); } private Mono<CosmosBulkOperationResponse<TContext>> enqueueForRetry( Duration backOffTime, FluxSink<CosmosItemOperation> groupSink, CosmosItemOperation itemOperation, PartitionScopeThresholds thresholds) { thresholds.recordEnqueuedRetry(); if (backOffTime == null || backOffTime.isZero()) { groupSink.next(itemOperation); return Mono.empty(); } else { return Mono .delay(backOffTime) .flatMap((dummy) -> { groupSink.next(itemOperation); return Mono.empty(); }); } } private Mono<CosmosBulkOperationResponse<TContext>> retryOtherExceptions( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, CosmosException cosmosException, ItemBulkOperation<?, ?> itemBulkOperation, PartitionScopeThresholds thresholds) { TContext actualContext = this.getActualContext(itemOperation); return itemBulkOperation.getRetryPolicy().shouldRetry(cosmosException).flatMap(result -> { if (result.shouldRetry) { return this.enqueueForRetry(result.backOffTime, groupSink, itemBulkOperation, thresholds); } else { return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, exception, actualContext)); } }); } private Mono<CosmosBatchResponse> executeBatchRequest(PartitionKeyRangeServerBatchRequest serverRequest) { RequestOptions options = new RequestOptions(); options.setOperationContextAndListenerTuple(operationListener); if (!this.docClientWrapper.isContentResponseOnWriteEnabled() && serverRequest.getOperations().size() > 0) { for (CosmosItemOperation itemOperation : serverRequest.getOperations()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; if (itemBulkOperation.getOperationType() == CosmosItemOperationType.READ || (itemBulkOperation.getRequestOptions() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled().booleanValue())) { options.setContentResponseOnWriteEnabled(true); break; } } } } return this.docClientWrapper.executeBatchRequest( BridgeInternal.getLink(this.container), serverRequest, options, false); } private void completeAllSinks() { logger.info("Closing all sinks, Context: {}", this.operationContextText); executorService.shutdown(); logger.debug("Executor service shut down, Context: {}", this.operationContextText); mainSink.tryEmitComplete(); logger.debug("Main sink completed, Context: {}", this.operationContextText); groupSinks.forEach(FluxSink::complete); logger.debug("All group sinks completed, Context: {}", this.operationContextText); try { this.executorService.shutdown(); logger.debug("Shutting down the executor service"); } catch (Exception e) { logger.warn("Failed to shut down the executor service", e); } } private void onFlush() { try { this.groupSinks.forEach(sink -> sink.next(FlushBuffersItemOperation.singleton())); } catch(Throwable t) { logger.error("Callback invocation 'onFlush' failed.", t); } } private static String getItemOperationDiagnostics(CosmosItemOperation operation) { if (operation == FlushBuffersItemOperation.singleton()) { return "ItemOperation[Type: Flush]"; } StringBuilder sb = new StringBuilder(); sb .append("ItemOperation[Type: ") .append(operation.getOperationType().toString()) .append(", PK: ") .append(operation.getPartitionKeyValue() != null ? operation.getPartitionKeyValue().toString() : "n/a") .append(", id: ") .append(operation.getId()) .append("]"); return sb.toString(); } private static String getThreadInfo() { StringBuilder sb = new StringBuilder(); Thread t = Thread.currentThread(); sb .append("Thread[") .append("Name: ") .append(t.getName()) .append(",Group: ") .append(t.getThreadGroup() != null ? t.getThreadGroup().getName() : "n/a") .append(", isDaemon: ") .append(t.isDaemon()) .append(", Id: ") .append(t.getId()) .append("]"); return sb.toString(); } private class SerializedEmitFailureHandler implements Sinks.EmitFailureHandler { @Override public boolean onEmitFailure(SignalType signalType, Sinks.EmitResult emitResult) { if (emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED)) { logger.debug("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); return true; } logger.error("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); return false; } } }
do we need it? since we already subscribed on the same schedulers previously
public Flux<CosmosBulkOperationResponse<TContext>> execute() { Integer nullableMaxConcurrentCosmosPartitions = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxConcurrentCosmosPartitions(cosmosBulkExecutionOptions); Mono<Integer> maxConcurrentCosmosPartitionsMono = nullableMaxConcurrentCosmosPartitions != null ? Mono.just(Math.max(256, nullableMaxConcurrentCosmosPartitions)) : this.container.getFeedRanges().map(ranges -> Math.max(256, ranges.size() * 2)); return maxConcurrentCosmosPartitionsMono .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(maxConcurrentCosmosPartitions -> { logger.debug("BulkExecutor.execute with MaxConcurrentPartitions: {}, Context: {}", maxConcurrentCosmosPartitions, this.operationContextText); return this.inputOperations .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .onErrorContinue((throwable, o) -> logger.error("Skipping an error operation while processing {}. Cause: {}, Context: {}", o, throwable.getMessage(), this.operationContextText)) .doOnNext((CosmosItemOperation cosmosItemOperation) -> { BulkExecutorUtil.setRetryPolicyForBulk( docClientWrapper, this.container, cosmosItemOperation, this.throttlingRetryOptions); if (cosmosItemOperation != FlushBuffersItemOperation.singleton()) { totalCount.incrementAndGet(); } logger.trace( "SetupRetryPolicy, {}, TotalCount: {}, Context: {}, {}", getItemOperationDiagnostics(cosmosItemOperation), totalCount.get(), this.operationContextText, getThreadInfo() ); }) .doOnComplete(() -> { mainSourceCompleted.set(true); long totalCountSnapshot = totalCount.get(); logger.debug("Main source completed - totalCountSnapshot, this.operationContextText); if (totalCountSnapshot == 0) { completeAllSinks(); } else { ScheduledFuture<?> scheduledFutureSnapshot = this.scheduledFutureForFlush; if (scheduledFutureSnapshot != null) { try { scheduledFutureSnapshot.cancel(true); logger.debug("Cancelled all future scheduled tasks {}", getThreadInfo()); } catch (Exception e) { logger.warn("Failed to cancel scheduled tasks{}", getThreadInfo(), e); } } this.onFlush(); long flushIntervalAfterDrainingIncomingFlux = Math.min( this.maxMicroBatchIntervalInMs, BatchRequestResponseConstants .DEFAULT_MAX_MICRO_BATCH_INTERVAL_AFTER_DRAINING_INCOMING_FLUX_IN_MILLISECONDS); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, flushIntervalAfterDrainingIncomingFlux, flushIntervalAfterDrainingIncomingFlux, TimeUnit.MILLISECONDS); } }) .mergeWith(mainSink.asFlux()) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap( operation -> { logger.trace("Before Resolve PkRangeId, {}, Context: {} {}", getItemOperationDiagnostics(operation), this.operationContextText, getThreadInfo()); return BulkExecutorUtil.resolvePartitionKeyRangeId(this.docClientWrapper, this.container, operation) .map((String pkRangeId) -> { PartitionScopeThresholds partitionScopeThresholds = this.partitionScopeThresholds.computeIfAbsent( pkRangeId, (newPkRangeId) -> new PartitionScopeThresholds(newPkRangeId, this.cosmosBulkExecutionOptions)); logger.trace("Resolved PkRangeId, {}, PKRangeId: {} Context: {} {}", getItemOperationDiagnostics(operation), pkRangeId, this.operationContextText, getThreadInfo()); return Pair.of(partitionScopeThresholds, operation); }); }) .groupBy(Pair::getKey, Pair::getValue) .flatMap( this::executePartitionedGroup, maxConcurrentCosmosPartitions) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .doOnNext(requestAndResponse -> { int totalCountAfterDecrement = totalCount.decrementAndGet(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountAfterDecrement == 0 && mainSourceCompletedSnapshot) { logger.debug("All work completed, {}, TotalCount: {}, Context: {} {}", getItemOperationDiagnostics(requestAndResponse.getOperation()), totalCountAfterDecrement, this.operationContextText, getThreadInfo()); completeAllSinks(); } else { logger.debug( "Work left - TotalCount after decrement: {}, main sink completed {}, {}, Context: {} {}", totalCountAfterDecrement, mainSourceCompletedSnapshot, getItemOperationDiagnostics(requestAndResponse.getOperation()), this.operationContextText, getThreadInfo()); } }) .doOnComplete(() -> { int totalCountSnapshot = totalCount.get(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountSnapshot == 0 && mainSourceCompletedSnapshot) { logger.debug("DoOnComplete: All work completed, Context: {}", this.operationContextText); completeAllSinks(); } else { logger.debug( "DoOnComplete: Work left - TotalCount after decrement: {}, main sink completed {}, Context: {} {}", totalCountSnapshot, mainSourceCompletedSnapshot, this.operationContextText, getThreadInfo()); } }); }); }
.subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC)
default concurrency (256), Integer nullableMaxConcurrentCosmosPartitions = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxConcurrentCosmosPartitions(cosmosBulkExecutionOptions); Mono<Integer> maxConcurrentCosmosPartitionsMono = nullableMaxConcurrentCosmosPartitions != null ? Mono.just(Math.max(256, nullableMaxConcurrentCosmosPartitions)) : this.container.getFeedRanges().map(ranges -> Math.max(256, ranges.size() * 2)); return maxConcurrentCosmosPartitionsMono .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(maxConcurrentCosmosPartitions -> { logger.debug("BulkExecutor.execute with MaxConcurrentPartitions: {}, Context: {}", maxConcurrentCosmosPartitions, this.operationContextText); return this.inputOperations .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .onErrorContinue((throwable, o) -> logger.error("Skipping an error operation while processing {}. Cause: {}, Context: {}", o, throwable.getMessage(), this.operationContextText)) .doOnNext((CosmosItemOperation cosmosItemOperation) -> { BulkExecutorUtil.setRetryPolicyForBulk( docClientWrapper, this.container, cosmosItemOperation, this.throttlingRetryOptions); if (cosmosItemOperation != FlushBuffersItemOperation.singleton()) { totalCount.incrementAndGet(); } logger.trace( "SetupRetryPolicy, {}, TotalCount: {}, Context: {}, {}", getItemOperationDiagnostics(cosmosItemOperation), totalCount.get(), this.operationContextText, getThreadInfo() ); }) .doOnComplete(() -> { mainSourceCompleted.set(true); long totalCountSnapshot = totalCount.get(); logger.debug("Main source completed - totalCountSnapshot, this.operationContextText); if (totalCountSnapshot == 0) { completeAllSinks(); } else { ScheduledFuture<?> scheduledFutureSnapshot = this.scheduledFutureForFlush; if (scheduledFutureSnapshot != null) { try { scheduledFutureSnapshot.cancel(true); logger.debug("Cancelled all future scheduled tasks {}", getThreadInfo()); } catch (Exception e) { logger.warn("Failed to cancel scheduled tasks{}", getThreadInfo(), e); } } this.onFlush(); long flushIntervalAfterDrainingIncomingFlux = Math.min( this.maxMicroBatchIntervalInMs, BatchRequestResponseConstants .DEFAULT_MAX_MICRO_BATCH_INTERVAL_AFTER_DRAINING_INCOMING_FLUX_IN_MILLISECONDS); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, flushIntervalAfterDrainingIncomingFlux, flushIntervalAfterDrainingIncomingFlux, TimeUnit.MILLISECONDS); } }) .mergeWith(mainSink.asFlux()) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap( operation -> { logger.trace("Before Resolve PkRangeId, {}, Context: {} {}", getItemOperationDiagnostics(operation), this.operationContextText, getThreadInfo()); return BulkExecutorUtil.resolvePartitionKeyRangeId(this.docClientWrapper, this.container, operation) .map((String pkRangeId) -> { PartitionScopeThresholds partitionScopeThresholds = this.partitionScopeThresholds.computeIfAbsent( pkRangeId, (newPkRangeId) -> new PartitionScopeThresholds(newPkRangeId, this.cosmosBulkExecutionOptions)); logger.trace("Resolved PkRangeId, {}, PKRangeId: {} Context: {} {}", getItemOperationDiagnostics(operation), pkRangeId, this.operationContextText, getThreadInfo()); return Pair.of(partitionScopeThresholds, operation); }); }) .groupBy(Pair::getKey, Pair::getValue) .flatMap( this::executePartitionedGroup, maxConcurrentCosmosPartitions) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .doOnNext(requestAndResponse -> { int totalCountAfterDecrement = totalCount.decrementAndGet(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountAfterDecrement == 0 && mainSourceCompletedSnapshot) { logger.debug("All work completed, {}, TotalCount: {}, Context: {} {}", getItemOperationDiagnostics(requestAndResponse.getOperation()), totalCountAfterDecrement, this.operationContextText, getThreadInfo()); completeAllSinks(); } else { logger.debug( "Work left - TotalCount after decrement: {}, main sink completed {}, {}, Context: {} {}", totalCountAfterDecrement, mainSourceCompletedSnapshot, getItemOperationDiagnostics(requestAndResponse.getOperation()), this.operationContextText, getThreadInfo()); } }) .doOnComplete(() -> { int totalCountSnapshot = totalCount.get(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountSnapshot == 0 && mainSourceCompletedSnapshot) { logger.debug("DoOnComplete: All work completed, Context: {}", this.operationContextText); completeAllSinks(); } else { logger.debug( "DoOnComplete: Work left - TotalCount after decrement: {}, main sink completed {}, Context: {} {}", totalCountSnapshot, mainSourceCompletedSnapshot, this.operationContextText, getThreadInfo()); } }); }
class BulkExecutor<TContext> { private final static Logger logger = LoggerFactory.getLogger(BulkExecutor.class); private final static AtomicLong instanceCount = new AtomicLong(0); private final CosmosAsyncContainer container; private final AsyncDocumentClient docClientWrapper; private final String operationContextText; private final OperationContextAndListenerTuple operationListener; private final ThrottlingRetryOptions throttlingRetryOptions; private final Flux<com.azure.cosmos.models.CosmosItemOperation> inputOperations; private final Long maxMicroBatchIntervalInMs; private final TContext batchContext; private final ConcurrentMap<String, PartitionScopeThresholds> partitionScopeThresholds; private final CosmosBulkExecutionOptions cosmosBulkExecutionOptions; private final AtomicBoolean mainSourceCompleted; private final AtomicInteger totalCount; private final Sinks.EmitFailureHandler serializedEmitFailureHandler; private final Sinks.Many<CosmosItemOperation> mainSink; private final List<FluxSink<CosmosItemOperation>> groupSinks; private final ScheduledExecutorService executorService; private ScheduledFuture<?> scheduledFutureForFlush; public BulkExecutor(CosmosAsyncContainer container, Flux<CosmosItemOperation> inputOperations, CosmosBulkExecutionOptions cosmosBulkOptions) { checkNotNull(container, "expected non-null container"); checkNotNull(inputOperations, "expected non-null inputOperations"); checkNotNull(cosmosBulkOptions, "expected non-null bulkOptions"); this.cosmosBulkExecutionOptions = cosmosBulkOptions; this.container = container; this.inputOperations = inputOperations; this.docClientWrapper = CosmosBridgeInternal.getAsyncDocumentClient(container.getDatabase()); this.throttlingRetryOptions = docClientWrapper.getConnectionPolicy().getThrottlingRetryOptions(); maxMicroBatchIntervalInMs = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchInterval(cosmosBulkExecutionOptions) .toMillis(); batchContext = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getLegacyBatchScopedContext(cosmosBulkExecutionOptions); this.partitionScopeThresholds = ImplementationBridgeHelpers.CosmosBulkExecutionThresholdsStateHelper .getBulkExecutionThresholdsAccessor() .getPartitionScopeThresholds(cosmosBulkExecutionOptions.getThresholdsState()); operationListener = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getOperationContext(cosmosBulkExecutionOptions); if (operationListener != null && operationListener.getOperationContext() != null) { operationContextText = operationListener.getOperationContext().toString(); } else { operationContextText = "n/a"; } mainSourceCompleted = new AtomicBoolean(false); totalCount = new AtomicInteger(0); serializedEmitFailureHandler = new SerializedEmitFailureHandler(); mainSink = Sinks.many().unicast().onBackpressureBuffer(); groupSinks = new CopyOnWriteArrayList<>(); this.executorService = Executors.newSingleThreadScheduledExecutor( new CosmosDaemonThreadFactory("BulkExecutor-" + instanceCount.incrementAndGet())); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, this.maxMicroBatchIntervalInMs, this.maxMicroBatchIntervalInMs, TimeUnit.MILLISECONDS); } public Flux<CosmosBulkOperationResponse<TContext>> execute() { } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionedGroup( GroupedFlux<PartitionScopeThresholds, CosmosItemOperation> partitionedGroupFluxOfInputOperations) { final PartitionScopeThresholds thresholds = partitionedGroupFluxOfInputOperations.key(); final FluxProcessor<CosmosItemOperation, CosmosItemOperation> groupFluxProcessor = UnicastProcessor.<CosmosItemOperation>create().serialize(); final FluxSink<CosmosItemOperation> groupSink = groupFluxProcessor.sink(FluxSink.OverflowStrategy.BUFFER); groupSinks.add(groupSink); AtomicLong firstRecordTimeStamp = new AtomicLong(-1); AtomicLong currentMicroBatchSize = new AtomicLong(0); AtomicInteger currentTotalSerializedLength = new AtomicInteger(0); return partitionedGroupFluxOfInputOperations .mergeWith(groupFluxProcessor) .onBackpressureBuffer() .timestamp() .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .bufferUntil(timeStampItemOperationTuple -> { long timestamp = timeStampItemOperationTuple.getT1(); CosmosItemOperation itemOperation = timeStampItemOperationTuple.getT2(); logger.trace( "BufferUntil - enqueued {}, {}, Context: {} {}", timestamp, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (itemOperation == FlushBuffersItemOperation.singleton()) { long currentMicroBatchSizeSnapshot = currentMicroBatchSize.get(); if (currentMicroBatchSizeSnapshot > 0) { logger.trace( "Flushing PKRange {} (batch size: {}) due to FlushItemOperation, Context: {} {}", thresholds.getPartitionKeyRangeId(), currentMicroBatchSizeSnapshot, this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; } firstRecordTimeStamp.compareAndSet(-1, timestamp); long age = timestamp - firstRecordTimeStamp.get(); long batchSize = currentMicroBatchSize.incrementAndGet(); int totalSerializedLength = this.calculateTotalSerializedLength(currentTotalSerializedLength, itemOperation); if (batchSize >= thresholds.getTargetMicroBatchSizeSnapshot() || age >= this.maxMicroBatchIntervalInMs || totalSerializedLength >= BatchRequestResponseConstants.MAX_DIRECT_MODE_BATCH_REQUEST_BODY_SIZE_IN_BYTES) { logger.debug( "BufferUntil - Flushing PKRange {} due to BatchSize ({}), payload size ({}) or age ({}), " + "Triggering {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), batchSize, totalSerializedLength, age, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; }) .flatMap( (List<Tuple2<Long, CosmosItemOperation>> timeStampAndItemOperationTuples) -> { List<CosmosItemOperation> operations = new ArrayList<>(timeStampAndItemOperationTuples.size()); for (Tuple2<Long, CosmosItemOperation> timeStampAndItemOperationTuple : timeStampAndItemOperationTuples) { CosmosItemOperation itemOperation = timeStampAndItemOperationTuple.getT2(); if (itemOperation == FlushBuffersItemOperation.singleton()) { continue; } operations.add(itemOperation); } logger.debug( "Flushing PKRange {} micro batch with {} operations, Context: {} {}", thresholds.getPartitionKeyRangeId(), operations.size(), this.operationContextText, getThreadInfo()); return executeOperations(operations, thresholds, groupSink); }, ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchConcurrency(this.cosmosBulkExecutionOptions)); } private int calculateTotalSerializedLength(AtomicInteger currentTotalSerializedLength, CosmosItemOperation item) { if (item instanceof CosmosItemOperationBase) { return currentTotalSerializedLength.accumulateAndGet( ((CosmosItemOperationBase) item).getSerializedLength(), (currentValue, incremental) -> currentValue + incremental); } return currentTotalSerializedLength.get(); } private Flux<CosmosBulkOperationResponse<TContext>> executeOperations( List<CosmosItemOperation> operations, PartitionScopeThresholds thresholds, FluxSink<CosmosItemOperation> groupSink) { if (operations.size() == 0) { logger.trace("Empty operations list, Context: {}", this.operationContextText); return Flux.empty(); } String pkRange = thresholds.getPartitionKeyRangeId(); ServerOperationBatchRequest serverOperationBatchRequest = BulkExecutorUtil.createBatchRequest(operations, pkRange); if (serverOperationBatchRequest.getBatchPendingOperations().size() > 0) { serverOperationBatchRequest.getBatchPendingOperations().forEach(groupSink::next); } return Flux.just(serverOperationBatchRequest.getBatchRequest()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((PartitionKeyRangeServerBatchRequest serverRequest) -> this.executePartitionKeyRangeServerBatchRequest(serverRequest, groupSink, thresholds)); } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionKeyRangeServerBatchRequest( PartitionKeyRangeServerBatchRequest serverRequest, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { return this.executeBatchRequest(serverRequest) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(response -> Flux .fromIterable(response.getResults()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosBatchOperationResult result) -> handleTransactionalBatchOperationResult(response, result, groupSink, thresholds))) .onErrorResume((Throwable throwable) -> { if (!(throwable instanceof Exception)) { throw Exceptions.propagate(throwable); } Exception exception = (Exception) throwable; return Flux .fromIterable(serverRequest.getOperations()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosItemOperation itemOperation) -> handleTransactionalBatchExecutionException(itemOperation, exception, groupSink, thresholds)); }); } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchOperationResult( CosmosBatchResponse response, CosmosBatchOperationResult operationResult, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { CosmosBulkItemResponse cosmosBulkItemResponse = ModelBridgeInternal .createCosmosBulkItemResponse(operationResult, response); CosmosItemOperation itemOperation = operationResult.getOperation(); TContext actualContext = this.getActualContext(itemOperation); logger.debug( "HandleTransactionalBatchOperationResult - PKRange {}, Response Status Code {}, " + "Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (!operationResult.isSuccessStatusCode()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy().shouldRetry(operationResult).flatMap( result -> { if (result.shouldRetry) { logger.debug( "HandleTransactionalBatchOperationResult - enqueue retry, PKRange {}, Response " + "Status Code {}, Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return this.enqueueForRetry(result.backOffTime, groupSink, itemOperation, thresholds); } else { logger.error( "HandleTransactionalBatchOperationResult - Fail, PKRange {}, Response Status " + "Code {}, Operation Status Code {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } }); } else { throw new UnsupportedOperationException("Unknown CosmosItemOperation."); } } thresholds.recordSuccessfulOperation(); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } private TContext getActualContext(CosmosItemOperation itemOperation) { ItemBulkOperation<?, ?> itemBulkOperation = null; if (itemOperation instanceof ItemBulkOperation<?, ?>) { itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; } if (itemBulkOperation == null) { return this.batchContext; } TContext operationContext = itemBulkOperation.getContext(); if (operationContext != null) { return operationContext; } return this.batchContext; } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchExecutionException( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { logger.debug( "HandleTransactionalBatchExecutionException, PKRange {}, Error: {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (exception instanceof CosmosException && itemOperation instanceof ItemBulkOperation<?, ?>) { CosmosException cosmosException = (CosmosException) exception; ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy() .shouldRetryForGone(cosmosException.getStatusCode(), cosmosException.getSubStatusCode()) .flatMap(shouldRetryGone -> { if (shouldRetryGone) { logger.debug( "HandleTransactionalBatchExecutionException - Retry due to split, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); mainSink.emitNext(itemOperation, serializedEmitFailureHandler); return Mono.empty(); } else { logger.debug( "HandleTransactionalBatchExecutionException - Retry other, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return retryOtherExceptions( itemOperation, exception, groupSink, cosmosException, itemBulkOperation, thresholds); } }); } TContext actualContext = this.getActualContext(itemOperation); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse(itemOperation, exception, actualContext)); } private Mono<CosmosBulkOperationResponse<TContext>> enqueueForRetry( Duration backOffTime, FluxSink<CosmosItemOperation> groupSink, CosmosItemOperation itemOperation, PartitionScopeThresholds thresholds) { thresholds.recordEnqueuedRetry(); if (backOffTime == null || backOffTime.isZero()) { groupSink.next(itemOperation); return Mono.empty(); } else { return Mono .delay(backOffTime) .flatMap((dummy) -> { groupSink.next(itemOperation); return Mono.empty(); }); } } private Mono<CosmosBulkOperationResponse<TContext>> retryOtherExceptions( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, CosmosException cosmosException, ItemBulkOperation<?, ?> itemBulkOperation, PartitionScopeThresholds thresholds) { TContext actualContext = this.getActualContext(itemOperation); return itemBulkOperation.getRetryPolicy().shouldRetry(cosmosException).flatMap(result -> { if (result.shouldRetry) { return this.enqueueForRetry(result.backOffTime, groupSink, itemBulkOperation, thresholds); } else { return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, exception, actualContext)); } }); } private Mono<CosmosBatchResponse> executeBatchRequest(PartitionKeyRangeServerBatchRequest serverRequest) { RequestOptions options = new RequestOptions(); options.setOperationContextAndListenerTuple(operationListener); if (!this.docClientWrapper.isContentResponseOnWriteEnabled() && serverRequest.getOperations().size() > 0) { for (CosmosItemOperation itemOperation : serverRequest.getOperations()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; if (itemBulkOperation.getOperationType() == CosmosItemOperationType.READ || (itemBulkOperation.getRequestOptions() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled().booleanValue())) { options.setContentResponseOnWriteEnabled(true); break; } } } } return this.docClientWrapper.executeBatchRequest( BridgeInternal.getLink(this.container), serverRequest, options, false); } private void completeAllSinks() { logger.info("Closing all sinks, Context: {}", this.operationContextText); executorService.shutdown(); logger.debug("Executor service shut down, Context: {}", this.operationContextText); mainSink.tryEmitComplete(); logger.debug("Main sink completed, Context: {}", this.operationContextText); groupSinks.forEach(FluxSink::complete); logger.debug("All group sinks completed, Context: {}", this.operationContextText); try { this.executorService.shutdown(); logger.debug("Shutting down the executor service"); } catch (Exception e) { logger.warn("Failed to shut down the executor service", e); } } private void onFlush() { try { this.groupSinks.forEach(sink -> sink.next(FlushBuffersItemOperation.singleton())); } catch(Throwable t) { logger.error("Callback invocation 'onFlush' failed.", t); } } private static String getItemOperationDiagnostics(CosmosItemOperation operation) { if (operation == FlushBuffersItemOperation.singleton()) { return "ItemOperation[Type: Flush]"; } StringBuilder sb = new StringBuilder(); sb .append("ItemOperation[Type: ") .append(operation.getOperationType().toString()) .append(", PK: ") .append(operation.getPartitionKeyValue() != null ? operation.getPartitionKeyValue().toString() : "n/a") .append(", id: ") .append(operation.getId()) .append("]"); return sb.toString(); } private static String getThreadInfo() { StringBuilder sb = new StringBuilder(); Thread t = Thread.currentThread(); sb .append("Thread[") .append("Name: ") .append(t.getName()) .append(",Group: ") .append(t.getThreadGroup() != null ? t.getThreadGroup().getName() : "n/a") .append(", isDaemon: ") .append(t.isDaemon()) .append(", Id: ") .append(t.getId()) .append("]"); return sb.toString(); } private class SerializedEmitFailureHandler implements Sinks.EmitFailureHandler { @Override public boolean onEmitFailure(SignalType signalType, Sinks.EmitResult emitResult) { logger.debug("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); if (emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED)) { return true; } return false; } } }
class BulkExecutor<TContext> { private final static Logger logger = LoggerFactory.getLogger(BulkExecutor.class); private final static AtomicLong instanceCount = new AtomicLong(0); private final CosmosAsyncContainer container; private final AsyncDocumentClient docClientWrapper; private final String operationContextText; private final OperationContextAndListenerTuple operationListener; private final ThrottlingRetryOptions throttlingRetryOptions; private final Flux<com.azure.cosmos.models.CosmosItemOperation> inputOperations; private final Long maxMicroBatchIntervalInMs; private final TContext batchContext; private final ConcurrentMap<String, PartitionScopeThresholds> partitionScopeThresholds; private final CosmosBulkExecutionOptions cosmosBulkExecutionOptions; private final AtomicBoolean mainSourceCompleted; private final AtomicInteger totalCount; private final Sinks.EmitFailureHandler serializedEmitFailureHandler; private final Sinks.Many<CosmosItemOperation> mainSink; private final List<FluxSink<CosmosItemOperation>> groupSinks; private final ScheduledExecutorService executorService; private ScheduledFuture<?> scheduledFutureForFlush; public BulkExecutor(CosmosAsyncContainer container, Flux<CosmosItemOperation> inputOperations, CosmosBulkExecutionOptions cosmosBulkOptions) { checkNotNull(container, "expected non-null container"); checkNotNull(inputOperations, "expected non-null inputOperations"); checkNotNull(cosmosBulkOptions, "expected non-null bulkOptions"); this.cosmosBulkExecutionOptions = cosmosBulkOptions; this.container = container; this.inputOperations = inputOperations; this.docClientWrapper = CosmosBridgeInternal.getAsyncDocumentClient(container.getDatabase()); this.throttlingRetryOptions = docClientWrapper.getConnectionPolicy().getThrottlingRetryOptions(); maxMicroBatchIntervalInMs = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchInterval(cosmosBulkExecutionOptions) .toMillis(); batchContext = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getLegacyBatchScopedContext(cosmosBulkExecutionOptions); this.partitionScopeThresholds = ImplementationBridgeHelpers.CosmosBulkExecutionThresholdsStateHelper .getBulkExecutionThresholdsAccessor() .getPartitionScopeThresholds(cosmosBulkExecutionOptions.getThresholdsState()); operationListener = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getOperationContext(cosmosBulkExecutionOptions); if (operationListener != null && operationListener.getOperationContext() != null) { operationContextText = operationListener.getOperationContext().toString(); } else { operationContextText = "n/a"; } mainSourceCompleted = new AtomicBoolean(false); totalCount = new AtomicInteger(0); serializedEmitFailureHandler = new SerializedEmitFailureHandler(); mainSink = Sinks.many().unicast().onBackpressureBuffer(); groupSinks = new CopyOnWriteArrayList<>(); this.executorService = Executors.newSingleThreadScheduledExecutor( new CosmosDaemonThreadFactory("BulkExecutor-" + instanceCount.incrementAndGet())); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, this.maxMicroBatchIntervalInMs, this.maxMicroBatchIntervalInMs, TimeUnit.MILLISECONDS); } public Flux<CosmosBulkOperationResponse<TContext>> execute() { } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionedGroup( GroupedFlux<PartitionScopeThresholds, CosmosItemOperation> partitionedGroupFluxOfInputOperations) { final PartitionScopeThresholds thresholds = partitionedGroupFluxOfInputOperations.key(); final FluxProcessor<CosmosItemOperation, CosmosItemOperation> groupFluxProcessor = UnicastProcessor.<CosmosItemOperation>create().serialize(); final FluxSink<CosmosItemOperation> groupSink = groupFluxProcessor.sink(FluxSink.OverflowStrategy.BUFFER); groupSinks.add(groupSink); AtomicLong firstRecordTimeStamp = new AtomicLong(-1); AtomicLong currentMicroBatchSize = new AtomicLong(0); AtomicInteger currentTotalSerializedLength = new AtomicInteger(0); return partitionedGroupFluxOfInputOperations .mergeWith(groupFluxProcessor) .onBackpressureBuffer() .timestamp() .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .bufferUntil(timeStampItemOperationTuple -> { long timestamp = timeStampItemOperationTuple.getT1(); CosmosItemOperation itemOperation = timeStampItemOperationTuple.getT2(); logger.trace( "BufferUntil - enqueued {}, {}, Context: {} {}", timestamp, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (itemOperation == FlushBuffersItemOperation.singleton()) { long currentMicroBatchSizeSnapshot = currentMicroBatchSize.get(); if (currentMicroBatchSizeSnapshot > 0) { logger.trace( "Flushing PKRange {} (batch size: {}) due to FlushItemOperation, Context: {} {}", thresholds.getPartitionKeyRangeId(), currentMicroBatchSizeSnapshot, this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; } firstRecordTimeStamp.compareAndSet(-1, timestamp); long age = timestamp - firstRecordTimeStamp.get(); long batchSize = currentMicroBatchSize.incrementAndGet(); int totalSerializedLength = this.calculateTotalSerializedLength(currentTotalSerializedLength, itemOperation); if (batchSize >= thresholds.getTargetMicroBatchSizeSnapshot() || age >= this.maxMicroBatchIntervalInMs || totalSerializedLength >= BatchRequestResponseConstants.MAX_DIRECT_MODE_BATCH_REQUEST_BODY_SIZE_IN_BYTES) { logger.debug( "BufferUntil - Flushing PKRange {} due to BatchSize ({}), payload size ({}) or age ({}), " + "Triggering {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), batchSize, totalSerializedLength, age, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; }) .flatMap( (List<Tuple2<Long, CosmosItemOperation>> timeStampAndItemOperationTuples) -> { List<CosmosItemOperation> operations = new ArrayList<>(timeStampAndItemOperationTuples.size()); for (Tuple2<Long, CosmosItemOperation> timeStampAndItemOperationTuple : timeStampAndItemOperationTuples) { CosmosItemOperation itemOperation = timeStampAndItemOperationTuple.getT2(); if (itemOperation == FlushBuffersItemOperation.singleton()) { continue; } operations.add(itemOperation); } logger.debug( "Flushing PKRange {} micro batch with {} operations, Context: {} {}", thresholds.getPartitionKeyRangeId(), operations.size(), this.operationContextText, getThreadInfo()); return executeOperations(operations, thresholds, groupSink); }, ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchConcurrency(this.cosmosBulkExecutionOptions)); } private int calculateTotalSerializedLength(AtomicInteger currentTotalSerializedLength, CosmosItemOperation item) { if (item instanceof CosmosItemOperationBase) { return currentTotalSerializedLength.accumulateAndGet( ((CosmosItemOperationBase) item).getSerializedLength(), (currentValue, incremental) -> currentValue + incremental); } return currentTotalSerializedLength.get(); } private Flux<CosmosBulkOperationResponse<TContext>> executeOperations( List<CosmosItemOperation> operations, PartitionScopeThresholds thresholds, FluxSink<CosmosItemOperation> groupSink) { if (operations.size() == 0) { logger.trace("Empty operations list, Context: {}", this.operationContextText); return Flux.empty(); } String pkRange = thresholds.getPartitionKeyRangeId(); ServerOperationBatchRequest serverOperationBatchRequest = BulkExecutorUtil.createBatchRequest(operations, pkRange); if (serverOperationBatchRequest.getBatchPendingOperations().size() > 0) { serverOperationBatchRequest.getBatchPendingOperations().forEach(groupSink::next); } return Flux.just(serverOperationBatchRequest.getBatchRequest()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((PartitionKeyRangeServerBatchRequest serverRequest) -> this.executePartitionKeyRangeServerBatchRequest(serverRequest, groupSink, thresholds)); } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionKeyRangeServerBatchRequest( PartitionKeyRangeServerBatchRequest serverRequest, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { return this.executeBatchRequest(serverRequest) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(response -> Flux .fromIterable(response.getResults()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosBatchOperationResult result) -> handleTransactionalBatchOperationResult(response, result, groupSink, thresholds))) .onErrorResume((Throwable throwable) -> { if (!(throwable instanceof Exception)) { throw Exceptions.propagate(throwable); } Exception exception = (Exception) throwable; return Flux .fromIterable(serverRequest.getOperations()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosItemOperation itemOperation) -> handleTransactionalBatchExecutionException(itemOperation, exception, groupSink, thresholds)); }); } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchOperationResult( CosmosBatchResponse response, CosmosBatchOperationResult operationResult, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { CosmosBulkItemResponse cosmosBulkItemResponse = ModelBridgeInternal .createCosmosBulkItemResponse(operationResult, response); CosmosItemOperation itemOperation = operationResult.getOperation(); TContext actualContext = this.getActualContext(itemOperation); logger.debug( "HandleTransactionalBatchOperationResult - PKRange {}, Response Status Code {}, " + "Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (!operationResult.isSuccessStatusCode()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy().shouldRetry(operationResult).flatMap( result -> { if (result.shouldRetry) { logger.debug( "HandleTransactionalBatchOperationResult - enqueue retry, PKRange {}, Response " + "Status Code {}, Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return this.enqueueForRetry(result.backOffTime, groupSink, itemOperation, thresholds); } else { logger.error( "HandleTransactionalBatchOperationResult - Fail, PKRange {}, Response Status " + "Code {}, Operation Status Code {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } }); } else { throw new UnsupportedOperationException("Unknown CosmosItemOperation."); } } thresholds.recordSuccessfulOperation(); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } private TContext getActualContext(CosmosItemOperation itemOperation) { ItemBulkOperation<?, ?> itemBulkOperation = null; if (itemOperation instanceof ItemBulkOperation<?, ?>) { itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; } if (itemBulkOperation == null) { return this.batchContext; } TContext operationContext = itemBulkOperation.getContext(); if (operationContext != null) { return operationContext; } return this.batchContext; } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchExecutionException( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { logger.debug( "HandleTransactionalBatchExecutionException, PKRange {}, Error: {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (exception instanceof CosmosException && itemOperation instanceof ItemBulkOperation<?, ?>) { CosmosException cosmosException = (CosmosException) exception; ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy() .shouldRetryForGone(cosmosException.getStatusCode(), cosmosException.getSubStatusCode()) .flatMap(shouldRetryGone -> { if (shouldRetryGone) { logger.debug( "HandleTransactionalBatchExecutionException - Retry due to split, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); mainSink.emitNext(itemOperation, serializedEmitFailureHandler); return Mono.empty(); } else { logger.debug( "HandleTransactionalBatchExecutionException - Retry other, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return retryOtherExceptions( itemOperation, exception, groupSink, cosmosException, itemBulkOperation, thresholds); } }); } TContext actualContext = this.getActualContext(itemOperation); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse(itemOperation, exception, actualContext)); } private Mono<CosmosBulkOperationResponse<TContext>> enqueueForRetry( Duration backOffTime, FluxSink<CosmosItemOperation> groupSink, CosmosItemOperation itemOperation, PartitionScopeThresholds thresholds) { thresholds.recordEnqueuedRetry(); if (backOffTime == null || backOffTime.isZero()) { groupSink.next(itemOperation); return Mono.empty(); } else { return Mono .delay(backOffTime) .flatMap((dummy) -> { groupSink.next(itemOperation); return Mono.empty(); }); } } private Mono<CosmosBulkOperationResponse<TContext>> retryOtherExceptions( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, CosmosException cosmosException, ItemBulkOperation<?, ?> itemBulkOperation, PartitionScopeThresholds thresholds) { TContext actualContext = this.getActualContext(itemOperation); return itemBulkOperation.getRetryPolicy().shouldRetry(cosmosException).flatMap(result -> { if (result.shouldRetry) { return this.enqueueForRetry(result.backOffTime, groupSink, itemBulkOperation, thresholds); } else { return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, exception, actualContext)); } }); } private Mono<CosmosBatchResponse> executeBatchRequest(PartitionKeyRangeServerBatchRequest serverRequest) { RequestOptions options = new RequestOptions(); options.setOperationContextAndListenerTuple(operationListener); if (!this.docClientWrapper.isContentResponseOnWriteEnabled() && serverRequest.getOperations().size() > 0) { for (CosmosItemOperation itemOperation : serverRequest.getOperations()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; if (itemBulkOperation.getOperationType() == CosmosItemOperationType.READ || (itemBulkOperation.getRequestOptions() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled().booleanValue())) { options.setContentResponseOnWriteEnabled(true); break; } } } } return this.docClientWrapper.executeBatchRequest( BridgeInternal.getLink(this.container), serverRequest, options, false); } private void completeAllSinks() { logger.info("Closing all sinks, Context: {}", this.operationContextText); executorService.shutdown(); logger.debug("Executor service shut down, Context: {}", this.operationContextText); mainSink.tryEmitComplete(); logger.debug("Main sink completed, Context: {}", this.operationContextText); groupSinks.forEach(FluxSink::complete); logger.debug("All group sinks completed, Context: {}", this.operationContextText); try { this.executorService.shutdown(); logger.debug("Shutting down the executor service"); } catch (Exception e) { logger.warn("Failed to shut down the executor service", e); } } private void onFlush() { try { this.groupSinks.forEach(sink -> sink.next(FlushBuffersItemOperation.singleton())); } catch(Throwable t) { logger.error("Callback invocation 'onFlush' failed.", t); } } private static String getItemOperationDiagnostics(CosmosItemOperation operation) { if (operation == FlushBuffersItemOperation.singleton()) { return "ItemOperation[Type: Flush]"; } StringBuilder sb = new StringBuilder(); sb .append("ItemOperation[Type: ") .append(operation.getOperationType().toString()) .append(", PK: ") .append(operation.getPartitionKeyValue() != null ? operation.getPartitionKeyValue().toString() : "n/a") .append(", id: ") .append(operation.getId()) .append("]"); return sb.toString(); } private static String getThreadInfo() { StringBuilder sb = new StringBuilder(); Thread t = Thread.currentThread(); sb .append("Thread[") .append("Name: ") .append(t.getName()) .append(",Group: ") .append(t.getThreadGroup() != null ? t.getThreadGroup().getName() : "n/a") .append(", isDaemon: ") .append(t.isDaemon()) .append(", Id: ") .append(t.getId()) .append("]"); return sb.toString(); } private class SerializedEmitFailureHandler implements Sinks.EmitFailureHandler { @Override public boolean onEmitFailure(SignalType signalType, Sinks.EmitResult emitResult) { if (emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED)) { logger.debug("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); return true; } logger.error("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); return false; } } }
maybe add error level log if can not retry.
public boolean onEmitFailure(SignalType signalType, Sinks.EmitResult emitResult) { logger.debug("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); if (emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED)) { return true; } return false; }
return false;
public boolean onEmitFailure(SignalType signalType, Sinks.EmitResult emitResult) { if (emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED)) { logger.debug("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); return true; } logger.error("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); return false; }
class SerializedEmitFailureHandler implements Sinks.EmitFailureHandler { @Override }
class SerializedEmitFailureHandler implements Sinks.EmitFailureHandler { @Override }
```suggestion Consumer<MockErrorContext> errorProcessor = mockErrorContext -> errorRaised(); ```
public MockEventProcessorTest(MockEventProcessorPerfOptions perfStressOptions) { super(perfStressOptions); Consumer<MockErrorContext> errorprocessor = mockErrorContext -> errorRaised(); Consumer<MockEventContext> eventPrcessor = mockEventContext -> eventRaised(); Duration errorAfter = perfStressOptions.getErrorAfterInSeconds() > 0 ? Duration.ofSeconds(perfStressOptions.getErrorAfterInSeconds()) : null; mockEventProcessor = new MockEventProcessor(2, perfStressOptions.getMaxEventsPerSecond(), errorAfter, errorprocessor, eventPrcessor); }
Consumer<MockErrorContext> errorprocessor = mockErrorContext -> errorRaised();
public MockEventProcessorTest(MockEventProcessorPerfOptions perfStressOptions) { super(perfStressOptions); Consumer<MockErrorContext> errorProcessor = mockErrorContext -> errorRaised(mockErrorContext.getThrowable()); Consumer<MockEventContext> eventProcessor = mockEventContext -> eventRaised(); Duration errorAfter = perfStressOptions.getErrorAfterInSeconds() > 0 ? Duration.ofSeconds(perfStressOptions.getErrorAfterInSeconds()) : null; mockEventProcessor = new MockEventProcessor(perfStressOptions.getPartitions(), perfStressOptions.getMaxEventsPerSecond(), errorAfter, errorProcessor, eventProcessor); }
class MockEventProcessorTest extends EventPerfTest<MockEventProcessorTest.MockEventProcessorPerfOptions> { private final MockEventProcessor mockEventProcessor; /** * Creates an instance of Mock Event Processor Test * @param perfStressOptions the options to used to configure the test. */ @Override public Mono<Void> setupAsync() { return super.setupAsync().then(Mono.defer(() -> { mockEventProcessor.start(); return Mono.empty(); })); } @Override public Mono<Void> cleanupAsync() { return Mono.defer(() -> { mockEventProcessor.stop(); return Mono.empty(); }).then(super.cleanupAsync()); } /** * Represents the perf options for Mock Event Processor Test. */ public static class MockEventProcessorPerfOptions extends PerfStressOptions { @Parameter(names = { "-meps", "--maxEventsPerSecond" }, description = "Maximum Events to send per second.") private int maxEventsPerSecond = 0; @Parameter(names = { "-ea", "--errorAfter" }, description = "Error After duration in seconds.") private int errorAfterInSeconds = 0; /** * Get Error after duration in seconds. * @return the error after duration in seconds. */ public int getErrorAfterInSeconds() { return errorAfterInSeconds; } /** * Get Maximum events per second. * @return the max events per second. */ public int getMaxEventsPerSecond() { return maxEventsPerSecond; } } }
class MockEventProcessorTest extends EventPerfTest<MockEventProcessorTest.MockEventProcessorPerfOptions> { private final MockEventProcessor mockEventProcessor; /** * Creates an instance of Mock Event Processor Test * @param perfStressOptions the options to used to configure the test. */ @Override public Mono<Void> setupAsync() { return super.setupAsync().then(Mono.defer(() -> { mockEventProcessor.start(); return Mono.empty(); })); } @Override public Mono<Void> cleanupAsync() { return Mono.defer(() -> { mockEventProcessor.stop(); return Mono.empty(); }).then(super.cleanupAsync()); } /** * Represents the perf options for Mock Event Processor Test. */ public static class MockEventProcessorPerfOptions extends PerfStressOptions { @Parameter(names = { "-meps", "--maxEventsPerSecond" }, description = "Maximum Events to send per second.") private int maxEventsPerSecond = 0; @Parameter(names = { "-ea", "--errorAfter" }, description = "Error After duration in seconds.") private int errorAfterInSeconds = 0; @Parameter(names = { "-pt", "--partitions" }, description = "Number of Partitions.") private int partitions = 1; /** * Get Error after duration in seconds. * @return the error after duration in seconds. */ public int getErrorAfterInSeconds() { return errorAfterInSeconds; } /** * Get Maximum events per second. * @return the max events per second. */ public int getMaxEventsPerSecond() { return maxEventsPerSecond; } /** * Get Maximum events per second. * @return the max events per second. */ public int getPartitions() { return partitions; } } }
```suggestion Consumer<MockEventContext> eventProcessor = mockEventContext -> eventRaised(); ```
public MockEventProcessorTest(MockEventProcessorPerfOptions perfStressOptions) { super(perfStressOptions); Consumer<MockErrorContext> errorprocessor = mockErrorContext -> errorRaised(); Consumer<MockEventContext> eventPrcessor = mockEventContext -> eventRaised(); Duration errorAfter = perfStressOptions.getErrorAfterInSeconds() > 0 ? Duration.ofSeconds(perfStressOptions.getErrorAfterInSeconds()) : null; mockEventProcessor = new MockEventProcessor(2, perfStressOptions.getMaxEventsPerSecond(), errorAfter, errorprocessor, eventPrcessor); }
Consumer<MockEventContext> eventPrcessor = mockEventContext -> eventRaised();
public MockEventProcessorTest(MockEventProcessorPerfOptions perfStressOptions) { super(perfStressOptions); Consumer<MockErrorContext> errorProcessor = mockErrorContext -> errorRaised(mockErrorContext.getThrowable()); Consumer<MockEventContext> eventProcessor = mockEventContext -> eventRaised(); Duration errorAfter = perfStressOptions.getErrorAfterInSeconds() > 0 ? Duration.ofSeconds(perfStressOptions.getErrorAfterInSeconds()) : null; mockEventProcessor = new MockEventProcessor(perfStressOptions.getPartitions(), perfStressOptions.getMaxEventsPerSecond(), errorAfter, errorProcessor, eventProcessor); }
class MockEventProcessorTest extends EventPerfTest<MockEventProcessorTest.MockEventProcessorPerfOptions> { private final MockEventProcessor mockEventProcessor; /** * Creates an instance of Mock Event Processor Test * @param perfStressOptions the options to used to configure the test. */ @Override public Mono<Void> setupAsync() { return super.setupAsync().then(Mono.defer(() -> { mockEventProcessor.start(); return Mono.empty(); })); } @Override public Mono<Void> cleanupAsync() { return Mono.defer(() -> { mockEventProcessor.stop(); return Mono.empty(); }).then(super.cleanupAsync()); } /** * Represents the perf options for Mock Event Processor Test. */ public static class MockEventProcessorPerfOptions extends PerfStressOptions { @Parameter(names = { "-meps", "--maxEventsPerSecond" }, description = "Maximum Events to send per second.") private int maxEventsPerSecond = 0; @Parameter(names = { "-ea", "--errorAfter" }, description = "Error After duration in seconds.") private int errorAfterInSeconds = 0; /** * Get Error after duration in seconds. * @return the error after duration in seconds. */ public int getErrorAfterInSeconds() { return errorAfterInSeconds; } /** * Get Maximum events per second. * @return the max events per second. */ public int getMaxEventsPerSecond() { return maxEventsPerSecond; } } }
class MockEventProcessorTest extends EventPerfTest<MockEventProcessorTest.MockEventProcessorPerfOptions> { private final MockEventProcessor mockEventProcessor; /** * Creates an instance of Mock Event Processor Test * @param perfStressOptions the options to used to configure the test. */ @Override public Mono<Void> setupAsync() { return super.setupAsync().then(Mono.defer(() -> { mockEventProcessor.start(); return Mono.empty(); })); } @Override public Mono<Void> cleanupAsync() { return Mono.defer(() -> { mockEventProcessor.stop(); return Mono.empty(); }).then(super.cleanupAsync()); } /** * Represents the perf options for Mock Event Processor Test. */ public static class MockEventProcessorPerfOptions extends PerfStressOptions { @Parameter(names = { "-meps", "--maxEventsPerSecond" }, description = "Maximum Events to send per second.") private int maxEventsPerSecond = 0; @Parameter(names = { "-ea", "--errorAfter" }, description = "Error After duration in seconds.") private int errorAfterInSeconds = 0; @Parameter(names = { "-pt", "--partitions" }, description = "Number of Partitions.") private int partitions = 1; /** * Get Error after duration in seconds. * @return the error after duration in seconds. */ public int getErrorAfterInSeconds() { return errorAfterInSeconds; } /** * Get Maximum events per second. * @return the max events per second. */ public int getMaxEventsPerSecond() { return maxEventsPerSecond; } /** * Get Maximum events per second. * @return the max events per second. */ public int getPartitions() { return partitions; } } }
```suggestion throw throwable; ``` I think this should align to .NET, where the `throwable` is saved and re-thrown when execution is completed. https://github.com/Azure/azure-sdk-for-net/blob/main/common/Perf/Azure.Test.Perf/EventPerfTest.cs#L84
public void runAll(long endNanoTime) { startTime = System.nanoTime(); completedOps.set(0); errorRaised = false; lastCompletionNanoTime = 0; while (System.nanoTime() < endNanoTime) { if (errorRaised) { break; } } }
break;
public void runAll(long endNanoTime) { startTime = System.nanoTime(); completedOps.set(0); errorRaised = false; lastCompletionNanoTime = 0; synchronized (this) { try { wait((endNanoTime - startTime) / 1000000); } catch (InterruptedException e) { } if (errorRaised) { throw new RuntimeException(throwable); } } }
class EventPerfTest<TOptions extends PerfStressOptions> extends PerfTestBase<TOptions> { private final AtomicInteger completedOps; private volatile boolean errorRaised; private long startTime; /** * Creates an instance of performance test. * @param options the options configured for the test. * @throws IllegalStateException if SSL context cannot be created. */ public EventPerfTest(TOptions options) { super(options); if (options.getTestProxies() != null && options.getTestProxies().size() > 0) { throw new IllegalStateException("Test Proxies are not supported for Event Perf Tests."); } completedOps = new AtomicInteger(0); } /** * Indicates an event was raised, and records its count internally. */ public void eventRaised() { completedOps.getAndIncrement(); lastCompletionNanoTime = System.nanoTime() - startTime; } /** * Indicates an error was raised, and stops the performance test flow. */ public void errorRaised() { errorRaised = true; lastCompletionNanoTime = System.nanoTime() - startTime; } @Override @Override public Mono<Void> runAllAsync(long endNanoTime) { return Mono.fromCallable(() -> { runAll(endNanoTime); return Mono.empty(); }).then(); } @Override public long getCompletedOperations() { return completedOps.longValue(); } }
class EventPerfTest<TOptions extends PerfStressOptions> extends PerfTestBase<TOptions> { private final AtomicLong completedOps; private volatile boolean errorRaised; private long startTime; private Throwable throwable; /** * Creates an instance of performance test. * @param options the options configured for the test. * @throws IllegalStateException if SSL context cannot be created. */ public EventPerfTest(TOptions options) { super(options); if (options.getTestProxies() != null && options.getTestProxies().size() > 0) { throw new IllegalStateException("Test Proxies are not supported for Event Perf Tests."); } completedOps = new AtomicLong(0); } /** * Indicates an event was raised, and records its count internally. */ public void eventRaised() { completedOps.getAndIncrement(); lastCompletionNanoTime = System.nanoTime() - startTime; } /** * Indicates an error was raised, and stops the performance test flow. */ public void errorRaised(Throwable throwable) { synchronized (this) { errorRaised = true; lastCompletionNanoTime = System.nanoTime() - startTime; this.throwable = throwable; notify(); } } @Override @Override public Mono<Void> runAllAsync(long endNanoTime) { return Mono.fromCallable(() -> { runAll(endNanoTime); return Mono.empty(); }).then(); } @Override public long getCompletedOperations() { return completedOps.get(); } }
This should be in an `else` block of `if (errorAfter` above. See the .NET version: https://github.com/Azure/azure-sdk-for-net/blob/main/common/Perf/Azure.Sample.Perf/Event/MockEventProcessor.cs#L99
private void process(int partition) { MockEventContext mockEventContext = mockEventContexts[partition]; if (maxEventsPerSecond > 0) { while (process) { long elapsedTime = (System.nanoTime() - startTime); if (errorAfter != null && !errorRaised && (errorAfter.compareTo(Duration.ofNanos(elapsedTime)) < 0)) { errorLock.lock(); if (!errorRaised) { processError(partition, new IllegalStateException("Test Exception")); errorRaised = true; } } int eventsSent = eventsRaised[partition]; double targetEventsSent = ((double) (elapsedTime / 1_000_000_000)) * maxEventsPerSecondPerPartition; if (eventsSent < targetEventsSent) { processEvent.accept(mockEventContext); eventsRaised[partition]++; } else { try { Thread.sleep((long) ((1 / maxEventsPerSecondPerPartition) * 1000)); } catch (InterruptedException e) { throw new RuntimeException(e); } } } } else { while (process) { if (errorAfter != null && !errorRaised && (errorAfter.compareTo(Duration.ofNanos((System.nanoTime() - startTime))) < 0)) { errorLock.lock(); if (!errorRaised) { processError(partition, new IllegalStateException("Test Exception")); errorRaised = true; } } processEvent.accept(mockEventContext); eventsRaised[partition]++; } } }
int eventsSent = eventsRaised[partition];
private void process(int partition) { MockEventContext mockEventContext = mockEventContexts[partition]; if (maxEventsPerSecond > 0) { while (process) { long elapsedTime = (System.nanoTime() - startTime); if (errorAfter != null && !errorRaised && (errorAfter.compareTo(Duration.ofNanos(elapsedTime)) < 0)) { errorLock.lock(); try { if (!errorRaised) { processError(partition, new IllegalStateException("Test Exception")); errorRaised = true; } } finally { errorLock.unlock(); } } else { int eventsSent = eventsRaised[partition]; double targetEventsSent = ((double) (elapsedTime / 1_000_000_000)) * maxEventsPerSecondPerPartition; if (eventsSent < targetEventsSent) { processEvent.accept(mockEventContext); eventsRaised[partition]++; } else { try { Thread.sleep((long) ((1 / maxEventsPerSecondPerPartition) * 1000)); } catch (InterruptedException e) { throw new RuntimeException(e); } } } } } else { while (process) { if (errorAfter != null && !errorRaised && (errorAfter.compareTo(Duration.ofNanos((System.nanoTime() - startTime))) < 0)) { errorLock.lock(); try { if (!errorRaised) { processError(partition, new IllegalStateException("Test Exception")); errorRaised = true; } } finally { errorLock.unlock(); } } else { processEvent.accept(mockEventContext); eventsRaised[partition]++; } } } }
class MockEventProcessor { private final Consumer<MockErrorContext> processError; private final Consumer<MockEventContext> processEvent; private boolean process; private final double maxEventsPerSecondPerPartition; private final int maxEventsPerSecond; private final int partitions; private final Duration errorAfter; private boolean errorRaised; private final ReentrantLock errorLock; private final MockEventContext[] mockEventContexts; private int[] eventsRaised; private long startTime; private final AtomicReference<ScheduledFuture<?>> runner = new AtomicReference<>(); private final AtomicReference<ScheduledExecutorService> scheduler = new AtomicReference<>(); /** * Creates an instance of a mock event processor * * @param partitions the number of partitions * @param maxEventsPerSecond the maximum events per second to send, optional. * @param errorAfter the duration after which processor should error out, optional. * @param processError the consumer to process the error. * @param processEvent the consumer to process the event. */ public MockEventProcessor(int partitions, int maxEventsPerSecond, Duration errorAfter, Consumer<MockErrorContext> processError, Consumer<MockEventContext> processEvent) { this.processError = processError; this.processEvent = processEvent; this.partitions = partitions; this.maxEventsPerSecond = maxEventsPerSecond; this.maxEventsPerSecondPerPartition = ((double) maxEventsPerSecond) / partitions; this.errorAfter = errorAfter; this.errorLock = new ReentrantLock(); mockEventContexts = new MockEventContext[partitions]; IntStream.range(0, partitions).boxed().forEach(integer -> { mockEventContexts[integer] = new MockEventContext(integer, "Hello"); }); this.eventsRaised = new int[partitions]; } /** * Starts the event processor. */ public synchronized void start() { eventsRaised = new int[eventsRaised.length]; process = true; errorRaised = false; startTime = System.nanoTime(); ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); scheduler.set(executor); Double jitterInMillis = ThreadLocalRandom.current().nextDouble() * TimeUnit.SECONDS.toMillis(0); runner.set(scheduler.get().schedule(this::processEvents, jitterInMillis.longValue(), TimeUnit.MILLISECONDS)); } private Mono<Void> processEvents() { while (process) { for (int i = 0; i < partitions; i++) { process(i); } } return Mono.empty(); } private void processError(int partition, Throwable throwable) { processError.accept(new MockErrorContext(partition, throwable)); stop(); } /** * Stops the Event Processor. */ public synchronized void stop() { runner.get().cancel(true); scheduler.get().shutdown(); this.process = false; } }
class MockEventProcessor { private final Consumer<MockErrorContext> processError; private final Consumer<MockEventContext> processEvent; private volatile boolean process; private final double maxEventsPerSecondPerPartition; private final int maxEventsPerSecond; private final int partitions; private final Duration errorAfter; private boolean errorRaised; private final ReentrantLock errorLock; private volatile boolean processPartitions; private final MockEventContext[] mockEventContexts; private int[] eventsRaised; private long startTime; private final AtomicReference<ScheduledFuture<?>> runner = new AtomicReference<>(); private final AtomicReference<ScheduledExecutorService> scheduler = new AtomicReference<>(); /** * Creates an instance of a mock event processor * * @param partitions the number of partitions * @param maxEventsPerSecond the maximum events per second to send, optional. * @param errorAfter the duration after which processor should error out, optional. * @param processError the consumer to process the error. * @param processEvent the consumer to process the event. */ public MockEventProcessor(int partitions, int maxEventsPerSecond, Duration errorAfter, Consumer<MockErrorContext> processError, Consumer<MockEventContext> processEvent) { this.processError = processError; this.processEvent = processEvent; this.partitions = partitions; this.maxEventsPerSecond = maxEventsPerSecond; this.maxEventsPerSecondPerPartition = ((double) maxEventsPerSecond) / partitions; this.errorAfter = errorAfter; this.errorLock = new ReentrantLock(); this.processPartitions = true; mockEventContexts = new MockEventContext[partitions]; IntStream.range(0, partitions).boxed().forEach(integer -> { mockEventContexts[integer] = new MockEventContext(integer, "Hello"); }); this.eventsRaised = new int[partitions]; } /** * Starts the event processor. */ public synchronized void start() { eventsRaised = new int[eventsRaised.length]; process = true; errorRaised = false; processPartitions = true; startTime = System.nanoTime(); ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); scheduler.set(executor); runner.set(scheduler.get().schedule(this::processEvents, 0l, TimeUnit.MILLISECONDS)); } private Mono<Void> processEvents() { if (processPartitions) { Flux.range(0, partitions) .parallel() .runOn(Schedulers.boundedElastic()) .subscribe(integer -> process(integer)); processPartitions = false; } return Mono.empty(); } private void processError(int partition, Throwable throwable) { processError.accept(new MockErrorContext(partition, throwable)); stop(); } /** * Stops the Event Processor. */ public synchronized void stop() { runner.get().cancel(true); scheduler.get().shutdown(); this.process = false; } }
This should be in an `else` block of `if (errorAfter` above. See the .NET version: https://github.com/Azure/azure-sdk-for-net/blob/main/common/Perf/Azure.Sample.Perf/Event/MockEventProcessor.cs#L131
private void process(int partition) { MockEventContext mockEventContext = mockEventContexts[partition]; if (maxEventsPerSecond > 0) { while (process) { long elapsedTime = (System.nanoTime() - startTime); if (errorAfter != null && !errorRaised && (errorAfter.compareTo(Duration.ofNanos(elapsedTime)) < 0)) { errorLock.lock(); if (!errorRaised) { processError(partition, new IllegalStateException("Test Exception")); errorRaised = true; } } int eventsSent = eventsRaised[partition]; double targetEventsSent = ((double) (elapsedTime / 1_000_000_000)) * maxEventsPerSecondPerPartition; if (eventsSent < targetEventsSent) { processEvent.accept(mockEventContext); eventsRaised[partition]++; } else { try { Thread.sleep((long) ((1 / maxEventsPerSecondPerPartition) * 1000)); } catch (InterruptedException e) { throw new RuntimeException(e); } } } } else { while (process) { if (errorAfter != null && !errorRaised && (errorAfter.compareTo(Duration.ofNanos((System.nanoTime() - startTime))) < 0)) { errorLock.lock(); if (!errorRaised) { processError(partition, new IllegalStateException("Test Exception")); errorRaised = true; } } processEvent.accept(mockEventContext); eventsRaised[partition]++; } } }
processEvent.accept(mockEventContext);
private void process(int partition) { MockEventContext mockEventContext = mockEventContexts[partition]; if (maxEventsPerSecond > 0) { while (process) { long elapsedTime = (System.nanoTime() - startTime); if (errorAfter != null && !errorRaised && (errorAfter.compareTo(Duration.ofNanos(elapsedTime)) < 0)) { errorLock.lock(); try { if (!errorRaised) { processError(partition, new IllegalStateException("Test Exception")); errorRaised = true; } } finally { errorLock.unlock(); } } else { int eventsSent = eventsRaised[partition]; double targetEventsSent = ((double) (elapsedTime / 1_000_000_000)) * maxEventsPerSecondPerPartition; if (eventsSent < targetEventsSent) { processEvent.accept(mockEventContext); eventsRaised[partition]++; } else { try { Thread.sleep((long) ((1 / maxEventsPerSecondPerPartition) * 1000)); } catch (InterruptedException e) { throw new RuntimeException(e); } } } } } else { while (process) { if (errorAfter != null && !errorRaised && (errorAfter.compareTo(Duration.ofNanos((System.nanoTime() - startTime))) < 0)) { errorLock.lock(); try { if (!errorRaised) { processError(partition, new IllegalStateException("Test Exception")); errorRaised = true; } } finally { errorLock.unlock(); } } else { processEvent.accept(mockEventContext); eventsRaised[partition]++; } } } }
class MockEventProcessor { private final Consumer<MockErrorContext> processError; private final Consumer<MockEventContext> processEvent; private boolean process; private final double maxEventsPerSecondPerPartition; private final int maxEventsPerSecond; private final int partitions; private final Duration errorAfter; private boolean errorRaised; private final ReentrantLock errorLock; private final MockEventContext[] mockEventContexts; private int[] eventsRaised; private long startTime; private final AtomicReference<ScheduledFuture<?>> runner = new AtomicReference<>(); private final AtomicReference<ScheduledExecutorService> scheduler = new AtomicReference<>(); /** * Creates an instance of a mock event processor * * @param partitions the number of partitions * @param maxEventsPerSecond the maximum events per second to send, optional. * @param errorAfter the duration after which processor should error out, optional. * @param processError the consumer to process the error. * @param processEvent the consumer to process the event. */ public MockEventProcessor(int partitions, int maxEventsPerSecond, Duration errorAfter, Consumer<MockErrorContext> processError, Consumer<MockEventContext> processEvent) { this.processError = processError; this.processEvent = processEvent; this.partitions = partitions; this.maxEventsPerSecond = maxEventsPerSecond; this.maxEventsPerSecondPerPartition = ((double) maxEventsPerSecond) / partitions; this.errorAfter = errorAfter; this.errorLock = new ReentrantLock(); mockEventContexts = new MockEventContext[partitions]; IntStream.range(0, partitions).boxed().forEach(integer -> { mockEventContexts[integer] = new MockEventContext(integer, "Hello"); }); this.eventsRaised = new int[partitions]; } /** * Starts the event processor. */ public synchronized void start() { eventsRaised = new int[eventsRaised.length]; process = true; errorRaised = false; startTime = System.nanoTime(); ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); scheduler.set(executor); Double jitterInMillis = ThreadLocalRandom.current().nextDouble() * TimeUnit.SECONDS.toMillis(0); runner.set(scheduler.get().schedule(this::processEvents, jitterInMillis.longValue(), TimeUnit.MILLISECONDS)); } private Mono<Void> processEvents() { while (process) { for (int i = 0; i < partitions; i++) { process(i); } } return Mono.empty(); } private void processError(int partition, Throwable throwable) { processError.accept(new MockErrorContext(partition, throwable)); stop(); } /** * Stops the Event Processor. */ public synchronized void stop() { runner.get().cancel(true); scheduler.get().shutdown(); this.process = false; } }
class MockEventProcessor { private final Consumer<MockErrorContext> processError; private final Consumer<MockEventContext> processEvent; private volatile boolean process; private final double maxEventsPerSecondPerPartition; private final int maxEventsPerSecond; private final int partitions; private final Duration errorAfter; private boolean errorRaised; private final ReentrantLock errorLock; private volatile boolean processPartitions; private final MockEventContext[] mockEventContexts; private int[] eventsRaised; private long startTime; private final AtomicReference<ScheduledFuture<?>> runner = new AtomicReference<>(); private final AtomicReference<ScheduledExecutorService> scheduler = new AtomicReference<>(); /** * Creates an instance of a mock event processor * * @param partitions the number of partitions * @param maxEventsPerSecond the maximum events per second to send, optional. * @param errorAfter the duration after which processor should error out, optional. * @param processError the consumer to process the error. * @param processEvent the consumer to process the event. */ public MockEventProcessor(int partitions, int maxEventsPerSecond, Duration errorAfter, Consumer<MockErrorContext> processError, Consumer<MockEventContext> processEvent) { this.processError = processError; this.processEvent = processEvent; this.partitions = partitions; this.maxEventsPerSecond = maxEventsPerSecond; this.maxEventsPerSecondPerPartition = ((double) maxEventsPerSecond) / partitions; this.errorAfter = errorAfter; this.errorLock = new ReentrantLock(); this.processPartitions = true; mockEventContexts = new MockEventContext[partitions]; IntStream.range(0, partitions).boxed().forEach(integer -> { mockEventContexts[integer] = new MockEventContext(integer, "Hello"); }); this.eventsRaised = new int[partitions]; } /** * Starts the event processor. */ public synchronized void start() { eventsRaised = new int[eventsRaised.length]; process = true; errorRaised = false; processPartitions = true; startTime = System.nanoTime(); ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); scheduler.set(executor); runner.set(scheduler.get().schedule(this::processEvents, 0l, TimeUnit.MILLISECONDS)); } private Mono<Void> processEvents() { if (processPartitions) { Flux.range(0, partitions) .parallel() .runOn(Schedulers.boundedElastic()) .subscribe(integer -> process(integer)); processPartitions = false; } return Mono.empty(); } private void processError(int partition, Throwable throwable) { processError.accept(new MockErrorContext(partition, throwable)); stop(); } /** * Stops the Event Processor. */ public synchronized void stop() { runner.get().cancel(true); scheduler.get().shutdown(); this.process = false; } }
```suggestion mockEventProcessor = new MockEventProcessor(perfStressOptions.getPartitions(), perfStressOptions.getMaxEventsPerSecond(), errorAfter, ```
public MockEventProcessorTest(MockEventProcessorPerfOptions perfStressOptions) { super(perfStressOptions); Consumer<MockErrorContext> errorprocessor = mockErrorContext -> errorRaised(); Consumer<MockEventContext> eventPrcessor = mockEventContext -> eventRaised(); Duration errorAfter = perfStressOptions.getErrorAfterInSeconds() > 0 ? Duration.ofSeconds(perfStressOptions.getErrorAfterInSeconds()) : null; mockEventProcessor = new MockEventProcessor(2, perfStressOptions.getMaxEventsPerSecond(), errorAfter, errorprocessor, eventPrcessor); }
mockEventProcessor = new MockEventProcessor(2, perfStressOptions.getMaxEventsPerSecond(), errorAfter,
public MockEventProcessorTest(MockEventProcessorPerfOptions perfStressOptions) { super(perfStressOptions); Consumer<MockErrorContext> errorProcessor = mockErrorContext -> errorRaised(mockErrorContext.getThrowable()); Consumer<MockEventContext> eventProcessor = mockEventContext -> eventRaised(); Duration errorAfter = perfStressOptions.getErrorAfterInSeconds() > 0 ? Duration.ofSeconds(perfStressOptions.getErrorAfterInSeconds()) : null; mockEventProcessor = new MockEventProcessor(perfStressOptions.getPartitions(), perfStressOptions.getMaxEventsPerSecond(), errorAfter, errorProcessor, eventProcessor); }
class MockEventProcessorTest extends EventPerfTest<MockEventProcessorTest.MockEventProcessorPerfOptions> { private final MockEventProcessor mockEventProcessor; /** * Creates an instance of Mock Event Processor Test * @param perfStressOptions the options to used to configure the test. */ @Override public Mono<Void> setupAsync() { return super.setupAsync().then(Mono.defer(() -> { mockEventProcessor.start(); return Mono.empty(); })); } @Override public Mono<Void> cleanupAsync() { return Mono.defer(() -> { mockEventProcessor.stop(); return Mono.empty(); }).then(super.cleanupAsync()); } /** * Represents the perf options for Mock Event Processor Test. */ public static class MockEventProcessorPerfOptions extends PerfStressOptions { @Parameter(names = { "-meps", "--maxEventsPerSecond" }, description = "Maximum Events to send per second.") private int maxEventsPerSecond = 0; @Parameter(names = { "-ea", "--errorAfter" }, description = "Error After duration in seconds.") private int errorAfterInSeconds = 0; /** * Get Error after duration in seconds. * @return the error after duration in seconds. */ public int getErrorAfterInSeconds() { return errorAfterInSeconds; } /** * Get Maximum events per second. * @return the max events per second. */ public int getMaxEventsPerSecond() { return maxEventsPerSecond; } } }
class MockEventProcessorTest extends EventPerfTest<MockEventProcessorTest.MockEventProcessorPerfOptions> { private final MockEventProcessor mockEventProcessor; /** * Creates an instance of Mock Event Processor Test * @param perfStressOptions the options to used to configure the test. */ @Override public Mono<Void> setupAsync() { return super.setupAsync().then(Mono.defer(() -> { mockEventProcessor.start(); return Mono.empty(); })); } @Override public Mono<Void> cleanupAsync() { return Mono.defer(() -> { mockEventProcessor.stop(); return Mono.empty(); }).then(super.cleanupAsync()); } /** * Represents the perf options for Mock Event Processor Test. */ public static class MockEventProcessorPerfOptions extends PerfStressOptions { @Parameter(names = { "-meps", "--maxEventsPerSecond" }, description = "Maximum Events to send per second.") private int maxEventsPerSecond = 0; @Parameter(names = { "-ea", "--errorAfter" }, description = "Error After duration in seconds.") private int errorAfterInSeconds = 0; @Parameter(names = { "-pt", "--partitions" }, description = "Number of Partitions.") private int partitions = 1; /** * Get Error after duration in seconds. * @return the error after duration in seconds. */ public int getErrorAfterInSeconds() { return errorAfterInSeconds; } /** * Get Maximum events per second. * @return the max events per second. */ public int getMaxEventsPerSecond() { return maxEventsPerSecond; } /** * Get Maximum events per second. * @return the max events per second. */ public int getPartitions() { return partitions; } } }
```suggestion try { if (!errorRaised) { processError(partition, new IllegalStateException("Test Exception")); errorRaised = true; } } finally { errorLock.unlock(); } ```
private void process(int partition) { MockEventContext mockEventContext = mockEventContexts[partition]; if (maxEventsPerSecond > 0) { while (process) { long elapsedTime = (System.nanoTime() - startTime); if (errorAfter != null && !errorRaised && (errorAfter.compareTo(Duration.ofNanos(elapsedTime)) < 0)) { errorLock.lock(); if (!errorRaised) { processError(partition, new IllegalStateException("Test Exception")); errorRaised = true; } } else { int eventsSent = eventsRaised[partition]; double targetEventsSent = ((double) (elapsedTime / 1_000_000_000)) * maxEventsPerSecondPerPartition; if (eventsSent < targetEventsSent) { processEvent.accept(mockEventContext); eventsRaised[partition]++; } else { try { Thread.sleep((long) ((1 / maxEventsPerSecondPerPartition) * 1000)); } catch (InterruptedException e) { throw new RuntimeException(e); } } } } } else { while (process) { if (errorAfter != null && !errorRaised && (errorAfter.compareTo(Duration.ofNanos((System.nanoTime() - startTime))) < 0)) { errorLock.lock(); if (!errorRaised) { processError(partition, new IllegalStateException("Test Exception")); errorRaised = true; } } else { processEvent.accept(mockEventContext); eventsRaised[partition]++; } } } }
}
private void process(int partition) { MockEventContext mockEventContext = mockEventContexts[partition]; if (maxEventsPerSecond > 0) { while (process) { long elapsedTime = (System.nanoTime() - startTime); if (errorAfter != null && !errorRaised && (errorAfter.compareTo(Duration.ofNanos(elapsedTime)) < 0)) { errorLock.lock(); try { if (!errorRaised) { processError(partition, new IllegalStateException("Test Exception")); errorRaised = true; } } finally { errorLock.unlock(); } } else { int eventsSent = eventsRaised[partition]; double targetEventsSent = ((double) (elapsedTime / 1_000_000_000)) * maxEventsPerSecondPerPartition; if (eventsSent < targetEventsSent) { processEvent.accept(mockEventContext); eventsRaised[partition]++; } else { try { Thread.sleep((long) ((1 / maxEventsPerSecondPerPartition) * 1000)); } catch (InterruptedException e) { throw new RuntimeException(e); } } } } } else { while (process) { if (errorAfter != null && !errorRaised && (errorAfter.compareTo(Duration.ofNanos((System.nanoTime() - startTime))) < 0)) { errorLock.lock(); try { if (!errorRaised) { processError(partition, new IllegalStateException("Test Exception")); errorRaised = true; } } finally { errorLock.unlock(); } } else { processEvent.accept(mockEventContext); eventsRaised[partition]++; } } } }
class MockEventProcessor { private final Consumer<MockErrorContext> processError; private final Consumer<MockEventContext> processEvent; private volatile boolean process; private final double maxEventsPerSecondPerPartition; private final int maxEventsPerSecond; private final int partitions; private final Duration errorAfter; private boolean errorRaised; private final ReentrantLock errorLock; private volatile boolean processPartitions; private final MockEventContext[] mockEventContexts; private int[] eventsRaised; private long startTime; private final AtomicReference<ScheduledFuture<?>> runner = new AtomicReference<>(); private final AtomicReference<ScheduledExecutorService> scheduler = new AtomicReference<>(); /** * Creates an instance of a mock event processor * * @param partitions the number of partitions * @param maxEventsPerSecond the maximum events per second to send, optional. * @param errorAfter the duration after which processor should error out, optional. * @param processError the consumer to process the error. * @param processEvent the consumer to process the event. */ public MockEventProcessor(int partitions, int maxEventsPerSecond, Duration errorAfter, Consumer<MockErrorContext> processError, Consumer<MockEventContext> processEvent) { this.processError = processError; this.processEvent = processEvent; this.partitions = partitions; this.maxEventsPerSecond = maxEventsPerSecond; this.maxEventsPerSecondPerPartition = ((double) maxEventsPerSecond) / partitions; this.errorAfter = errorAfter; this.errorLock = new ReentrantLock(); this.processPartitions = true; mockEventContexts = new MockEventContext[partitions]; IntStream.range(0, partitions).boxed().forEach(integer -> { mockEventContexts[integer] = new MockEventContext(integer, "Hello"); }); this.eventsRaised = new int[partitions]; } /** * Starts the event processor. */ public synchronized void start() { eventsRaised = new int[eventsRaised.length]; process = true; errorRaised = false; processPartitions = true; startTime = System.nanoTime(); ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); scheduler.set(executor); runner.set(scheduler.get().schedule(this::processEvents, 0l, TimeUnit.MILLISECONDS)); } private Mono<Void> processEvents() { if (processPartitions) { Flux.range(0, partitions) .parallel() .runOn(Schedulers.boundedElastic()) .subscribe(integer -> process(integer)); processPartitions = false; } return Mono.empty(); } private void processError(int partition, Throwable throwable) { processError.accept(new MockErrorContext(partition, throwable)); stop(); } /** * Stops the Event Processor. */ public synchronized void stop() { runner.get().cancel(true); scheduler.get().shutdown(); this.process = false; } }
class MockEventProcessor { private final Consumer<MockErrorContext> processError; private final Consumer<MockEventContext> processEvent; private volatile boolean process; private final double maxEventsPerSecondPerPartition; private final int maxEventsPerSecond; private final int partitions; private final Duration errorAfter; private boolean errorRaised; private final ReentrantLock errorLock; private volatile boolean processPartitions; private final MockEventContext[] mockEventContexts; private int[] eventsRaised; private long startTime; private final AtomicReference<ScheduledFuture<?>> runner = new AtomicReference<>(); private final AtomicReference<ScheduledExecutorService> scheduler = new AtomicReference<>(); /** * Creates an instance of a mock event processor * * @param partitions the number of partitions * @param maxEventsPerSecond the maximum events per second to send, optional. * @param errorAfter the duration after which processor should error out, optional. * @param processError the consumer to process the error. * @param processEvent the consumer to process the event. */ public MockEventProcessor(int partitions, int maxEventsPerSecond, Duration errorAfter, Consumer<MockErrorContext> processError, Consumer<MockEventContext> processEvent) { this.processError = processError; this.processEvent = processEvent; this.partitions = partitions; this.maxEventsPerSecond = maxEventsPerSecond; this.maxEventsPerSecondPerPartition = ((double) maxEventsPerSecond) / partitions; this.errorAfter = errorAfter; this.errorLock = new ReentrantLock(); this.processPartitions = true; mockEventContexts = new MockEventContext[partitions]; IntStream.range(0, partitions).boxed().forEach(integer -> { mockEventContexts[integer] = new MockEventContext(integer, "Hello"); }); this.eventsRaised = new int[partitions]; } /** * Starts the event processor. */ public synchronized void start() { eventsRaised = new int[eventsRaised.length]; process = true; errorRaised = false; processPartitions = true; startTime = System.nanoTime(); ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); scheduler.set(executor); runner.set(scheduler.get().schedule(this::processEvents, 0l, TimeUnit.MILLISECONDS)); } private Mono<Void> processEvents() { if (processPartitions) { Flux.range(0, partitions) .parallel() .runOn(Schedulers.boundedElastic()) .subscribe(integer -> process(integer)); processPartitions = false; } return Mono.empty(); } private void processError(int partition, Throwable throwable) { processError.accept(new MockErrorContext(partition, throwable)); stop(); } /** * Stops the Event Processor. */ public synchronized void stop() { runner.get().cancel(true); scheduler.get().shutdown(); this.process = false; } }
```suggestion try { if (!errorRaised) { processError(partition, new IllegalStateException("Test Exception")); errorRaised = true; } } finally { errorLock.unlock(); } ```
private void process(int partition) { MockEventContext mockEventContext = mockEventContexts[partition]; if (maxEventsPerSecond > 0) { while (process) { long elapsedTime = (System.nanoTime() - startTime); if (errorAfter != null && !errorRaised && (errorAfter.compareTo(Duration.ofNanos(elapsedTime)) < 0)) { errorLock.lock(); if (!errorRaised) { processError(partition, new IllegalStateException("Test Exception")); errorRaised = true; } } else { int eventsSent = eventsRaised[partition]; double targetEventsSent = ((double) (elapsedTime / 1_000_000_000)) * maxEventsPerSecondPerPartition; if (eventsSent < targetEventsSent) { processEvent.accept(mockEventContext); eventsRaised[partition]++; } else { try { Thread.sleep((long) ((1 / maxEventsPerSecondPerPartition) * 1000)); } catch (InterruptedException e) { throw new RuntimeException(e); } } } } } else { while (process) { if (errorAfter != null && !errorRaised && (errorAfter.compareTo(Duration.ofNanos((System.nanoTime() - startTime))) < 0)) { errorLock.lock(); if (!errorRaised) { processError(partition, new IllegalStateException("Test Exception")); errorRaised = true; } } else { processEvent.accept(mockEventContext); eventsRaised[partition]++; } } } }
}
private void process(int partition) { MockEventContext mockEventContext = mockEventContexts[partition]; if (maxEventsPerSecond > 0) { while (process) { long elapsedTime = (System.nanoTime() - startTime); if (errorAfter != null && !errorRaised && (errorAfter.compareTo(Duration.ofNanos(elapsedTime)) < 0)) { errorLock.lock(); try { if (!errorRaised) { processError(partition, new IllegalStateException("Test Exception")); errorRaised = true; } } finally { errorLock.unlock(); } } else { int eventsSent = eventsRaised[partition]; double targetEventsSent = ((double) (elapsedTime / 1_000_000_000)) * maxEventsPerSecondPerPartition; if (eventsSent < targetEventsSent) { processEvent.accept(mockEventContext); eventsRaised[partition]++; } else { try { Thread.sleep((long) ((1 / maxEventsPerSecondPerPartition) * 1000)); } catch (InterruptedException e) { throw new RuntimeException(e); } } } } } else { while (process) { if (errorAfter != null && !errorRaised && (errorAfter.compareTo(Duration.ofNanos((System.nanoTime() - startTime))) < 0)) { errorLock.lock(); try { if (!errorRaised) { processError(partition, new IllegalStateException("Test Exception")); errorRaised = true; } } finally { errorLock.unlock(); } } else { processEvent.accept(mockEventContext); eventsRaised[partition]++; } } } }
class MockEventProcessor { private final Consumer<MockErrorContext> processError; private final Consumer<MockEventContext> processEvent; private volatile boolean process; private final double maxEventsPerSecondPerPartition; private final int maxEventsPerSecond; private final int partitions; private final Duration errorAfter; private boolean errorRaised; private final ReentrantLock errorLock; private volatile boolean processPartitions; private final MockEventContext[] mockEventContexts; private int[] eventsRaised; private long startTime; private final AtomicReference<ScheduledFuture<?>> runner = new AtomicReference<>(); private final AtomicReference<ScheduledExecutorService> scheduler = new AtomicReference<>(); /** * Creates an instance of a mock event processor * * @param partitions the number of partitions * @param maxEventsPerSecond the maximum events per second to send, optional. * @param errorAfter the duration after which processor should error out, optional. * @param processError the consumer to process the error. * @param processEvent the consumer to process the event. */ public MockEventProcessor(int partitions, int maxEventsPerSecond, Duration errorAfter, Consumer<MockErrorContext> processError, Consumer<MockEventContext> processEvent) { this.processError = processError; this.processEvent = processEvent; this.partitions = partitions; this.maxEventsPerSecond = maxEventsPerSecond; this.maxEventsPerSecondPerPartition = ((double) maxEventsPerSecond) / partitions; this.errorAfter = errorAfter; this.errorLock = new ReentrantLock(); this.processPartitions = true; mockEventContexts = new MockEventContext[partitions]; IntStream.range(0, partitions).boxed().forEach(integer -> { mockEventContexts[integer] = new MockEventContext(integer, "Hello"); }); this.eventsRaised = new int[partitions]; } /** * Starts the event processor. */ public synchronized void start() { eventsRaised = new int[eventsRaised.length]; process = true; errorRaised = false; processPartitions = true; startTime = System.nanoTime(); ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); scheduler.set(executor); runner.set(scheduler.get().schedule(this::processEvents, 0l, TimeUnit.MILLISECONDS)); } private Mono<Void> processEvents() { if (processPartitions) { Flux.range(0, partitions) .parallel() .runOn(Schedulers.boundedElastic()) .subscribe(integer -> process(integer)); processPartitions = false; } return Mono.empty(); } private void processError(int partition, Throwable throwable) { processError.accept(new MockErrorContext(partition, throwable)); stop(); } /** * Stops the Event Processor. */ public synchronized void stop() { runner.get().cancel(true); scheduler.get().shutdown(); this.process = false; } }
class MockEventProcessor { private final Consumer<MockErrorContext> processError; private final Consumer<MockEventContext> processEvent; private volatile boolean process; private final double maxEventsPerSecondPerPartition; private final int maxEventsPerSecond; private final int partitions; private final Duration errorAfter; private boolean errorRaised; private final ReentrantLock errorLock; private volatile boolean processPartitions; private final MockEventContext[] mockEventContexts; private int[] eventsRaised; private long startTime; private final AtomicReference<ScheduledFuture<?>> runner = new AtomicReference<>(); private final AtomicReference<ScheduledExecutorService> scheduler = new AtomicReference<>(); /** * Creates an instance of a mock event processor * * @param partitions the number of partitions * @param maxEventsPerSecond the maximum events per second to send, optional. * @param errorAfter the duration after which processor should error out, optional. * @param processError the consumer to process the error. * @param processEvent the consumer to process the event. */ public MockEventProcessor(int partitions, int maxEventsPerSecond, Duration errorAfter, Consumer<MockErrorContext> processError, Consumer<MockEventContext> processEvent) { this.processError = processError; this.processEvent = processEvent; this.partitions = partitions; this.maxEventsPerSecond = maxEventsPerSecond; this.maxEventsPerSecondPerPartition = ((double) maxEventsPerSecond) / partitions; this.errorAfter = errorAfter; this.errorLock = new ReentrantLock(); this.processPartitions = true; mockEventContexts = new MockEventContext[partitions]; IntStream.range(0, partitions).boxed().forEach(integer -> { mockEventContexts[integer] = new MockEventContext(integer, "Hello"); }); this.eventsRaised = new int[partitions]; } /** * Starts the event processor. */ public synchronized void start() { eventsRaised = new int[eventsRaised.length]; process = true; errorRaised = false; processPartitions = true; startTime = System.nanoTime(); ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); scheduler.set(executor); runner.set(scheduler.get().schedule(this::processEvents, 0l, TimeUnit.MILLISECONDS)); } private Mono<Void> processEvents() { if (processPartitions) { Flux.range(0, partitions) .parallel() .runOn(Schedulers.boundedElastic()) .subscribe(integer -> process(integer)); processPartitions = false; } return Mono.empty(); } private void processError(int partition, Throwable throwable) { processError.accept(new MockErrorContext(partition, throwable)); stop(); } /** * Stops the Event Processor. */ public synchronized void stop() { runner.get().cancel(true); scheduler.get().shutdown(); this.process = false; } }